diff -pruN 2025.03.1-8/.binder/environment.yml 2025.10.1-1/.binder/environment.yml
--- 2025.03.1-8/.binder/environment.yml	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/.binder/environment.yml	2025-10-10 10:38:05.000000000 +0000
@@ -2,7 +2,7 @@ name: xarray-examples
 channels:
   - conda-forge
 dependencies:
-  - python=3.10
+  - python=3.11
   - boto3
   - bottleneck
   - cartopy
diff -pruN 2025.03.1-8/.git_archival.txt 2025.10.1-1/.git_archival.txt
--- 2025.03.1-8/.git_archival.txt	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/.git_archival.txt	2025-10-10 10:38:05.000000000 +0000
@@ -1,4 +1,4 @@
-node: 2aa2e73c71cf5241abe9802909153dd24da2be00
-node-date: 2025-03-30T20:55:18-06:00
-describe-name: v2025.03.1
-ref-names: tag: v2025.03.1
+node: c131fbf1bb5ab7769728867b70bdf5179f01b40f
+node-date: 2025-10-07T13:23:49-07:00
+describe-name: v2025.10.1
+ref-names: tag: v2025.10.1, stable
diff -pruN 2025.03.1-8/.github/ISSUE_TEMPLATE/bugreport.yml 2025.10.1-1/.github/ISSUE_TEMPLATE/bugreport.yml
--- 2025.03.1-8/.github/ISSUE_TEMPLATE/bugreport.yml	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/.github/ISSUE_TEMPLATE/bugreport.yml	2025-10-10 10:38:05.000000000 +0000
@@ -26,8 +26,35 @@ body:
     attributes:
       label: Minimal Complete Verifiable Example
       description: |
-        Minimal, self-contained copy-pastable example that demonstrates the issue.         This will be automatically formatted into code, so no need for markdown backticks.
+        Minimal, self-contained copy-pastable example that demonstrates the issue.
+
+        Consider listing additional or specific dependencies in [inline script metadata](https://packaging.python.org/en/latest/specifications/inline-script-metadata/#example)
+        so that calling `uv run issue.py` shows the issue when copied into `issue.py`. (not strictly required)
+
+        This will be automatically formatted into code, so no need for markdown backticks.
       render: Python
+      value: |
+        # /// script
+        # requires-python = ">=3.11"
+        # dependencies = [
+        #   "xarray[complete]@git+https://github.com/pydata/xarray.git@main",
+        # ]
+        # ///
+        #
+        # This script automatically imports the development branch of xarray to check for issues.
+        # Please delete this header if you have _not_ tested this script with `uv run`!
+
+        import xarray as xr
+        xr.show_versions()
+        # your reproducer code ...
+
+  - type: textarea
+    id: reproduce
+    attributes:
+      label: Steps to reproduce
+      description:
+    validations:
+      required: false
 
   - type: checkboxes
     id: mvce-checkboxes
diff -pruN 2025.03.1-8/.github/labeler.yml 2025.10.1-1/.github/labeler.yml
--- 2025.03.1-8/.github/labeler.yml	1970-01-01 00:00:00.000000000 +0000
+++ 2025.10.1-1/.github/labeler.yml	2025-10-10 10:38:05.000000000 +0000
@@ -0,0 +1,116 @@
+Automation:
+  - changed-files:
+      - any-glob-to-any-file:
+          - .github/**
+
+CI:
+  - changed-files:
+      - any-glob-to-any-file:
+          - ci/**
+
+dependencies:
+  - changed-files:
+      - any-glob-to-any-file:
+          - ci/requirements/*
+
+topic-arrays:
+  - changed-files:
+      - any-glob-to-any-file:
+          - xarray/core/duck_array_ops.py
+
+topic-backends:
+  - changed-files:
+      - any-glob-to-any-file:
+          - xarray/backends/**
+
+topic-cftime:
+  - changed-files:
+      - any-glob-to-any-file:
+          - xarray/coding/*time*
+
+topic-CF conventions:
+  - changed-files:
+      - any-glob-to-any-file:
+          - xarray/conventions.py
+
+topic-dask:
+  - changed-files:
+      - any-glob-to-any-file:
+          - xarray/compat/dask*
+          - xarray/core/parallel.py
+
+topic-DataTree:
+  - changed-files:
+      - any-glob-to-any-file:
+          - xarray/core/datatree*
+
+topic-documentation:
+  - all:
+      - changed-files:
+          - any-glob-to-any-file: "doc/**/*"
+          - all-globs-to-all-files: "!doc/whats-new.rst"
+
+topic-groupby:
+  - changed-files:
+      - any-glob-to-any-file:
+          - xarray/core/groupby.py
+
+topic-html-repr:
+  - changed-files:
+      - any-glob-to-any-file:
+          - xarray/core/formatting_html.py
+
+topic-hypothesis:
+  - changed-files:
+      - any-glob-to-any-file:
+          - properties/**
+          - xarray/testing/strategies.py
+
+topic-indexing:
+  - changed-files:
+      - any-glob-to-any-file:
+          - xarray/core/indexes.py
+          - xarray/core/indexing.py
+
+topic-NamedArray:
+  - changed-files:
+      - any-glob-to-any-file:
+          - xarray/namedarray/*
+
+topic-performance:
+  - changed-files:
+      - any-glob-to-any-file:
+          - asv_bench/benchmarks/**
+
+topic-plotting:
+  - changed-files:
+      - any-glob-to-any-file:
+          - xarray/plot/*
+          - xarray/plot/**/*
+
+topic-rolling:
+  - changed-files:
+      - any-glob-to-any-file:
+          - xarray/computation/rolling.py
+          - xarray/computation/rolling_exp.py
+
+topic-testing:
+  - changed-files:
+      - any-glob-to-any-file:
+          - conftest.py
+          - xarray/testing/*
+
+topic-typing:
+  - changed-files:
+      - any-glob-to-any-file:
+          - xarray/core/types.py
+
+topic-zarr:
+  - changed-files:
+      - any-glob-to-any-file:
+          - xarray/backends/zarr.py
+
+io:
+  - changed-files:
+      - any-glob-to-any-file:
+          - xarray/backends/**
diff -pruN 2025.03.1-8/.github/release.yml 2025.10.1-1/.github/release.yml
--- 2025.03.1-8/.github/release.yml	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/.github/release.yml	2025-10-10 10:38:05.000000000 +0000
@@ -1,5 +1,5 @@
 changelog:
   exclude:
     authors:
-      - dependabot
-      - pre-commit-ci
+      - dependabot[bot]
+      - pre-commit-ci[bot]
diff -pruN 2025.03.1-8/.github/workflows/benchmarks-last-release.yml 2025.10.1-1/.github/workflows/benchmarks-last-release.yml
--- 2025.03.1-8/.github/workflows/benchmarks-last-release.yml	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/.github/workflows/benchmarks-last-release.yml	2025-10-10 10:38:05.000000000 +0000
@@ -9,7 +9,7 @@ on:
 jobs:
   benchmark:
     name: Linux
-    runs-on: ubuntu-20.04
+    runs-on: ubuntu-latest
     env:
       ASV_DIR: "./asv_bench"
       CONDA_ENV_FILE: ci/requirements/environment.yml
@@ -17,7 +17,7 @@ jobs:
     steps:
       # We need the full repo to avoid this issue
       # https://github.com/actions/checkout/issues/23
-      - uses: actions/checkout@v4
+      - uses: actions/checkout@v5
         with:
           fetch-depth: 0
 
diff -pruN 2025.03.1-8/.github/workflows/benchmarks.yml 2025.10.1-1/.github/workflows/benchmarks.yml
--- 2025.03.1-8/.github/workflows/benchmarks.yml	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/.github/workflows/benchmarks.yml	2025-10-10 10:38:05.000000000 +0000
@@ -10,17 +10,17 @@ env:
 
 jobs:
   benchmark:
-    if: ${{ contains( github.event.pull_request.labels.*.name, 'run-benchmark') && github.event_name == 'pull_request' || github.event_name == 'workflow_dispatch' }}
+    if: ${{ contains( github.event.pull_request.labels.*.name, 'run-benchmark') && github.event_name == 'pull_request' || contains( github.event.pull_request.labels.*.name, 'topic-performance') && github.event_name == 'pull_request' || github.event_name == 'workflow_dispatch' }}
     name: Linux
-    runs-on: ubuntu-20.04
+    runs-on: ubuntu-latest
     env:
       ASV_DIR: "./asv_bench"
-      CONDA_ENV_FILE: ci/requirements/environment.yml
+      CONDA_ENV_FILE: ci/requirements/environment-benchmark.yml
 
     steps:
       # We need the full repo to avoid this issue
       # https://github.com/actions/checkout/issues/23
-      - uses: actions/checkout@v4
+      - uses: actions/checkout@v5
         with:
           fetch-depth: 0
 
@@ -29,7 +29,7 @@ jobs:
         with:
           micromamba-version: "1.5.10-0"
           environment-file: ${{env.CONDA_ENV_FILE}}
-          environment-name: xarray-tests
+          environment-name: xarray-benchmark
           cache-environment: true
           cache-environment-key: "${{runner.os}}-${{runner.arch}}-py${{env.PYTHON_VERSION}}-${{env.TODAY}}-${{hashFiles(env.CONDA_ENV_FILE)}}-benchmark"
           # add "build" because of https://github.com/airspeed-velocity/asv/issues/1385
diff -pruN 2025.03.1-8/.github/workflows/ci-additional.yaml 2025.10.1-1/.github/workflows/ci-additional.yaml
--- 2025.03.1-8/.github/workflows/ci-additional.yaml	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/.github/workflows/ci-additional.yaml	2025-10-10 10:38:05.000000000 +0000
@@ -25,7 +25,7 @@ jobs:
     outputs:
       triggered: ${{ steps.detect-trigger.outputs.trigger-found }}
     steps:
-      - uses: actions/checkout@v4
+      - uses: actions/checkout@v5
         with:
           fetch-depth: 2
       - uses: xarray-contrib/ci-trigger@v1
@@ -46,7 +46,7 @@ jobs:
       CONDA_ENV_FILE: ci/requirements/environment.yml
       PYTHON_VERSION: "3.12"
     steps:
-      - uses: actions/checkout@v4
+      - uses: actions/checkout@v5
         with:
           fetch-depth: 0 # Fetch all history for all branches and tags.
 
@@ -92,7 +92,7 @@ jobs:
       PYTHON_VERSION: "3.12"
 
     steps:
-      - uses: actions/checkout@v4
+      - uses: actions/checkout@v5
         with:
           fetch-depth: 0 # Fetch all history for all branches and tags.
 
@@ -116,14 +116,14 @@ jobs:
           python xarray/util/print_versions.py
       - name: Install mypy
         run: |
-          python -m pip install "mypy==1.15" --force-reinstall
+          python -m pip install "mypy==1.18.1" --force-reinstall
 
       - name: Run mypy
         run: |
           python -m mypy --install-types --non-interactive --cobertura-xml-report mypy_report
 
       - name: Upload mypy coverage to Codecov
-        uses: codecov/codecov-action@v5.4.0
+        uses: codecov/codecov-action@v5.5.1
         with:
           file: mypy_report/cobertura.xml
           flags: mypy
@@ -132,7 +132,7 @@ jobs:
           fail_ci_if_error: false
 
   mypy-min:
-    name: Mypy 3.10
+    name: Mypy 3.11
     runs-on: "ubuntu-latest"
     needs: detect-ci-trigger
     defaults:
@@ -140,10 +140,10 @@ jobs:
         shell: bash -l {0}
     env:
       CONDA_ENV_FILE: ci/requirements/environment.yml
-      PYTHON_VERSION: "3.10"
+      PYTHON_VERSION: "3.11"
 
     steps:
-      - uses: actions/checkout@v4
+      - uses: actions/checkout@v5
         with:
           fetch-depth: 0 # Fetch all history for all branches and tags.
 
@@ -167,14 +167,14 @@ jobs:
           python xarray/util/print_versions.py
       - name: Install mypy
         run: |
-          python -m pip install "mypy==1.15" --force-reinstall
+          python -m pip install "mypy==1.18.1" --force-reinstall
 
       - name: Run mypy
         run: |
           python -m mypy --install-types --non-interactive --cobertura-xml-report mypy_report
 
       - name: Upload mypy coverage to Codecov
-        uses: codecov/codecov-action@v5.4.0
+        uses: codecov/codecov-action@v5.5.1
         with:
           file: mypy_report/cobertura.xml
           flags: mypy-min
@@ -199,7 +199,7 @@ jobs:
       PYTHON_VERSION: "3.12"
 
     steps:
-      - uses: actions/checkout@v4
+      - uses: actions/checkout@v5
         with:
           fetch-depth: 0 # Fetch all history for all branches and tags.
 
@@ -230,7 +230,7 @@ jobs:
           python -m pyright xarray/
 
       - name: Upload pyright coverage to Codecov
-        uses: codecov/codecov-action@v5.4.0
+        uses: codecov/codecov-action@v5.5.1
         with:
           file: pyright_report/cobertura.xml
           flags: pyright
@@ -239,7 +239,7 @@ jobs:
           fail_ci_if_error: false
 
   pyright39:
-    name: Pyright 3.10
+    name: Pyright 3.11
     runs-on: "ubuntu-latest"
     needs: detect-ci-trigger
     if: |
@@ -252,10 +252,10 @@ jobs:
         shell: bash -l {0}
     env:
       CONDA_ENV_FILE: ci/requirements/environment.yml
-      PYTHON_VERSION: "3.10"
+      PYTHON_VERSION: "3.11"
 
     steps:
-      - uses: actions/checkout@v4
+      - uses: actions/checkout@v5
         with:
           fetch-depth: 0 # Fetch all history for all branches and tags.
 
@@ -286,7 +286,7 @@ jobs:
           python -m pyright xarray/
 
       - name: Upload pyright coverage to Codecov
-        uses: codecov/codecov-action@v5.4.0
+        uses: codecov/codecov-action@v5.5.1
         with:
           file: pyright_report/cobertura.xml
           flags: pyright39
@@ -307,27 +307,22 @@ jobs:
       COLUMNS: 120
 
     steps:
-      - uses: actions/checkout@v4
+      - uses: actions/checkout@v5
         with:
           fetch-depth: 0 # Fetch all history for all branches and tags.
 
-      - name: Setup micromamba
-        uses: mamba-org/setup-micromamba@v2
+      - uses: actions/setup-python@v6
         with:
-          environment-name: xarray-tests
-          create-args: >-
-            python=3.12
-            pyyaml
-            python-dateutil
-            cytoolz
-            rich
-            rich-click
-            py-rattler
+          python-version: "3.x"
 
       - name: All-deps minimum versions policy
-        run: |
-          python ci/minimum_versions.py ci/requirements/min-all-deps.yml
+        uses: xarray-contrib/minimum-dependency-versions@e2ac8ff0a76e8603d8536ef5d64743a375961ce9 # v0.1.1
+        with:
+          policy: ci/policy.yaml
+          environment-paths: ci/requirements/min-all-deps.yml
 
       - name: Bare minimum versions policy
-        run: |
-          python ci/minimum_versions.py ci/requirements/bare-minimum.yml
+        uses: xarray-contrib/minimum-dependency-versions@e2ac8ff0a76e8603d8536ef5d64743a375961ce9 # v0.1.1
+        with:
+          policy: ci/policy.yaml
+          environment-paths: ci/requirements/bare-minimum.yml
diff -pruN 2025.03.1-8/.github/workflows/ci.yaml 2025.10.1-1/.github/workflows/ci.yaml
--- 2025.03.1-8/.github/workflows/ci.yaml	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/.github/workflows/ci.yaml	2025-10-10 10:38:05.000000000 +0000
@@ -25,7 +25,7 @@ jobs:
     outputs:
       triggered: ${{ steps.detect-trigger.outputs.trigger-found }}
     steps:
-      - uses: actions/checkout@v4
+      - uses: actions/checkout@v5
         with:
           fetch-depth: 2
       - uses: xarray-contrib/ci-trigger@v1
@@ -37,8 +37,6 @@ jobs:
     runs-on: ${{ matrix.os }}
     needs: detect-ci-trigger
     if: needs.detect-ci-trigger.outputs.triggered == 'false'
-    env:
-      ZARR_V3_EXPERIMENTAL_API: 1
     defaults:
       run:
         shell: bash -l {0}
@@ -47,15 +45,18 @@ jobs:
       matrix:
         os: ["ubuntu-latest", "macos-latest", "windows-latest"]
         # Bookend python versions
-        python-version: ["3.10", "3.13"]
+        python-version: ["3.11", "3.13"]
         env: [""]
         include:
           # Minimum python version:
           - env: "bare-minimum"
-            python-version: "3.10"
+            python-version: "3.11"
+            os: ubuntu-latest
+          - env: "bare-min-and-scipy"
+            python-version: "3.11"
             os: ubuntu-latest
           - env: "min-all-deps"
-            python-version: "3.10"
+            python-version: "3.11"
             os: ubuntu-latest
           # Latest python version:
           - env: "all-but-numba"
@@ -70,7 +71,7 @@ jobs:
           # The mypy tests must be executed using only 1 process in order to guarantee
           # predictable mypy output messages for comparison to expectations.
           - env: "mypy"
-            python-version: "3.10"
+            python-version: "3.11"
             numprocesses: 1
             os: ubuntu-latest
           - env: "mypy"
@@ -78,7 +79,7 @@ jobs:
             numprocesses: 1
             os: ubuntu-latest
     steps:
-      - uses: actions/checkout@v4
+      - uses: actions/checkout@v5
         with:
           fetch-depth: 0 # Fetch all history for all branches and tags.
       - name: Set environment variables
@@ -172,7 +173,7 @@ jobs:
           path: pytest.xml
 
       - name: Upload code coverage to Codecov
-        uses: codecov/codecov-action@v5.4.0
+        uses: codecov/codecov-action@v5.5.1
         env:
           CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
         with:
diff -pruN 2025.03.1-8/.github/workflows/hypothesis.yaml 2025.10.1-1/.github/workflows/hypothesis.yaml
--- 2025.03.1-8/.github/workflows/hypothesis.yaml	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/.github/workflows/hypothesis.yaml	2025-10-10 10:38:05.000000000 +0000
@@ -24,7 +24,7 @@ jobs:
     outputs:
       triggered: ${{ steps.detect-trigger.outputs.trigger-found }}
     steps:
-      - uses: actions/checkout@v4
+      - uses: actions/checkout@v5
         with:
           fetch-depth: 2
       - uses: xarray-contrib/ci-trigger@v1
@@ -52,7 +52,7 @@ jobs:
       PYTHON_VERSION: "3.12"
 
     steps:
-      - uses: actions/checkout@v4
+      - uses: actions/checkout@v5
         with:
           fetch-depth: 0 # Fetch all history for all branches and tags.
 
@@ -110,7 +110,7 @@ jobs:
           && steps.status.outcome == 'failure'
           && github.event_name == 'schedule'
           && github.repository_owner == 'pydata'
-        uses: xarray-contrib/issue-from-pytest-log@v1
+        uses: scientific-python/issue-from-pytest-log-action@v1
         with:
           log-path: output-${{ matrix.python-version }}-log.jsonl
           issue-title: "Nightly Hypothesis tests failed"
diff -pruN 2025.03.1-8/.github/workflows/label-prs.yml 2025.10.1-1/.github/workflows/label-prs.yml
--- 2025.03.1-8/.github/workflows/label-prs.yml	1970-01-01 00:00:00.000000000 +0000
+++ 2025.10.1-1/.github/workflows/label-prs.yml	2025-10-10 10:38:05.000000000 +0000
@@ -0,0 +1,12 @@
+name: "PR Labeler"
+on:
+  - pull_request_target
+
+jobs:
+  label:
+    runs-on: ubuntu-latest
+    steps:
+      - uses: actions/labeler@v6
+        with:
+          repo-token: "${{ secrets.GITHUB_TOKEN }}"
+          sync-labels: false
diff -pruN 2025.03.1-8/.github/workflows/nightly-wheels.yml 2025.10.1-1/.github/workflows/nightly-wheels.yml
--- 2025.03.1-8/.github/workflows/nightly-wheels.yml	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/.github/workflows/nightly-wheels.yml	2025-10-10 10:38:05.000000000 +0000
@@ -8,10 +8,10 @@ jobs:
     runs-on: ubuntu-latest
     if: github.repository == 'pydata/xarray'
     steps:
-      - uses: actions/checkout@v4
+      - uses: actions/checkout@v5
         with:
           fetch-depth: 0
-      - uses: actions/setup-python@v5
+      - uses: actions/setup-python@v6
         with:
           python-version: "3.12"
 
@@ -38,7 +38,7 @@ jobs:
           fi
 
       - name: Upload wheel
-        uses: scientific-python/upload-nightly-action@82396a2ed4269ba06c6b2988bb4fd568ef3c3d6b # 0.6.1
+        uses: scientific-python/upload-nightly-action@b36e8c0c10dbcfd2e05bf95f17ef8c14fd708dbf # 0.6.2
         with:
           anaconda_nightly_upload_token: ${{ secrets.ANACONDA_NIGHTLY }}
           artifacts_path: dist
diff -pruN 2025.03.1-8/.github/workflows/pypi-release.yaml 2025.10.1-1/.github/workflows/pypi-release.yaml
--- 2025.03.1-8/.github/workflows/pypi-release.yaml	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/.github/workflows/pypi-release.yaml	2025-10-10 10:38:05.000000000 +0000
@@ -6,16 +6,24 @@ on:
   push:
     tags:
       - "v*"
+  pull_request:
+    types: [opened, reopened, synchronize, labeled]
+  workflow_dispatch:
 
 jobs:
   build-artifacts:
     runs-on: ubuntu-latest
-    if: github.repository == 'pydata/xarray'
+    if: ${{ github.repository == 'pydata/xarray' && (
+      (contains(github.event.pull_request.labels.*.name, 'Release') && github.event_name == 'pull_request') ||
+      github.event_name == 'release' ||
+      github.event_name == 'workflow_dispatch' ||
+      startsWith(github.ref, 'refs/tags/v')
+      ) }}
     steps:
-      - uses: actions/checkout@v4
+      - uses: actions/checkout@v5
         with:
           fetch-depth: 0
-      - uses: actions/setup-python@v5
+      - uses: actions/setup-python@v6
         name: Install Python
         with:
           python-version: "3.12"
@@ -50,11 +58,11 @@ jobs:
     needs: build-artifacts
     runs-on: ubuntu-latest
     steps:
-      - uses: actions/setup-python@v5
+      - uses: actions/setup-python@v6
         name: Install Python
         with:
           python-version: "3.12"
-      - uses: actions/download-artifact@v4
+      - uses: actions/download-artifact@v5
         with:
           name: releases
           path: dist
@@ -64,7 +72,6 @@ jobs:
           ls -ltrh dist
 
       - name: Verify the built dist/wheel is valid
-        if: github.event_name == 'push'
         run: |
           python -m pip install --upgrade pip
           python -m pip install dist/xarray*.whl
@@ -82,13 +89,13 @@ jobs:
       id-token: write
 
     steps:
-      - uses: actions/download-artifact@v4
+      - uses: actions/download-artifact@v5
         with:
           name: releases
           path: dist
       - name: Publish package to TestPyPI
         if: github.event_name == 'push'
-        uses: pypa/gh-action-pypi-publish@v1.12.4
+        uses: pypa/gh-action-pypi-publish@v1.13.0
         with:
           repository_url: https://test.pypi.org/legacy/
           verbose: true
@@ -105,11 +112,11 @@ jobs:
       id-token: write
 
     steps:
-      - uses: actions/download-artifact@v4
+      - uses: actions/download-artifact@v5
         with:
           name: releases
           path: dist
       - name: Publish package to PyPI
-        uses: pypa/gh-action-pypi-publish@v1.12.4
+        uses: pypa/gh-action-pypi-publish@v1.13.0
         with:
           verbose: true
diff -pruN 2025.03.1-8/.github/workflows/upstream-dev-ci.yaml 2025.10.1-1/.github/workflows/upstream-dev-ci.yaml
--- 2025.03.1-8/.github/workflows/upstream-dev-ci.yaml	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/.github/workflows/upstream-dev-ci.yaml	2025-10-10 10:38:05.000000000 +0000
@@ -28,7 +28,7 @@ jobs:
     outputs:
       triggered: ${{ steps.detect-trigger.outputs.trigger-found }}
     steps:
-      - uses: actions/checkout@v4
+      - uses: actions/checkout@v5
         with:
           fetch-depth: 2
       - uses: xarray-contrib/ci-trigger@v1
@@ -40,8 +40,6 @@ jobs:
     name: upstream-dev
     runs-on: ubuntu-latest
     needs: detect-ci-trigger
-    env:
-      ZARR_V3_EXPERIMENTAL_API: 1
     if: |
       always()
       && (
@@ -57,7 +55,7 @@ jobs:
       matrix:
         python-version: ["3.12"]
     steps:
-      - uses: actions/checkout@v4
+      - uses: actions/checkout@v5
         with:
           fetch-depth: 0 # Fetch all history for all branches and tags.
       - name: Set up conda environment
@@ -92,7 +90,7 @@ jobs:
           && steps.status.outcome == 'failure'
           && github.event_name == 'schedule'
           && github.repository_owner == 'pydata'
-        uses: xarray-contrib/issue-from-pytest-log@v1
+        uses: scientific-python/issue-from-pytest-log-action@v1
         with:
           log-path: output-${{ matrix.python-version }}-log.jsonl
 
@@ -113,7 +111,7 @@ jobs:
       matrix:
         python-version: ["3.11"]
     steps:
-      - uses: actions/checkout@v4
+      - uses: actions/checkout@v5
         with:
           fetch-depth: 0 # Fetch all history for all branches and tags.
       - name: Set up conda environment
@@ -140,7 +138,7 @@ jobs:
         run: |
           python -m mypy --install-types --non-interactive --cobertura-xml-report mypy_report
       - name: Upload mypy coverage to Codecov
-        uses: codecov/codecov-action@v5.4.0
+        uses: codecov/codecov-action@v5.5.1
         with:
           file: mypy_report/cobertura.xml
           flags: mypy
diff -pruN 2025.03.1-8/.gitignore 2025.10.1-1/.gitignore
--- 2025.03.1-8/.gitignore	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/.gitignore	2025-10-10 10:38:05.000000000 +0000
@@ -10,7 +10,6 @@ __pycache__
 doc/*.nc
 doc/auto_gallery
 doc/rasm.zarr
-doc/savefig
 
 # C extensions
 *.so
@@ -71,8 +70,12 @@ asv_bench/pkgs
 # xarray specific
 doc/_build
 doc/generated/
+doc/api/generated/
 xarray/tests/data/*.grib.*.idx
 
+# Claude Code
+.claude/
+
 # Sync tools
 Icon*
 
@@ -86,3 +89,5 @@ doc/videos-gallery.txt
 # gitignore to make it _easier_ to work with `uv`, not as an indication that I
 # think we shouldn't...)
 uv.lock
+mypy_report/
+xarray-docs/
diff -pruN 2025.03.1-8/.pre-commit-config.yaml 2025.10.1-1/.pre-commit-config.yaml
--- 2025.03.1-8/.pre-commit-config.yaml	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/.pre-commit-config.yaml	2025-10-10 10:38:05.000000000 +0000
@@ -4,7 +4,7 @@ ci:
   autoupdate_commit_msg: "Update pre-commit hooks"
 repos:
   - repo: https://github.com/pre-commit/pre-commit-hooks
-    rev: v5.0.0
+    rev: v6.0.0
     hooks:
       - id: trailing-whitespace
       - id: end-of-file-fixer
@@ -24,25 +24,26 @@ repos:
       - id: rst-inline-touching-normal
       - id: text-unicode-replacement-char
   - repo: https://github.com/astral-sh/ruff-pre-commit
-    # Ruff version.
-    rev: v0.9.9
+    rev: v0.13.3
     hooks:
-      - id: ruff-format
-      - id: ruff
+      - id: ruff-check
         args: ["--fix", "--show-fixes"]
-  - repo: https://github.com/keewis/blackdoc
-    rev: v0.3.9
-    hooks:
-      - id: blackdoc
-        exclude: "generate_aggregations.py"
-        additional_dependencies: ["black==24.8.0"]
+      - id: ruff-format
+  # Disabled: blackdoc v0.4.3 has compatibility issues with Python 3.13
+  # Re-enable when blackdoc is updated to support Python 3.13
+  # - repo: https://github.com/keewis/blackdoc
+  #   rev: v0.4.3
+  #   hooks:
+  #     - id: blackdoc
+  #       exclude: "generate_aggregations.py"
+  #       additional_dependencies: ["black==24.8.0"]
   - repo: https://github.com/rbubley/mirrors-prettier
-    rev: v3.5.3
+    rev: v3.6.2
     hooks:
       - id: prettier
         args: [--cache-location=.prettier_cache/cache]
   - repo: https://github.com/pre-commit/mirrors-mypy
-    rev: v1.15.0
+    rev: v1.18.2
     hooks:
       - id: mypy
         # Copied from setup.cfg
@@ -69,13 +70,11 @@ repos:
       - id: taplo-format
         args: ["--option", "array_auto_collapse=false"]
   - repo: https://github.com/abravalheri/validate-pyproject
-    rev: v0.23
+    rev: v0.24.1
     hooks:
       - id: validate-pyproject
         additional_dependencies: ["validate-pyproject-schema-store[all]"]
-  - repo: https://github.com/crate-ci/typos
-    rev: dictgen-v0.3.1
+  - repo: https://github.com/adhtruong/mirrors-typos
+    rev: v1.37.2
     hooks:
       - id: typos
-        # https://github.com/crate-ci/typos/issues/347
-        pass_filenames: false
diff -pruN 2025.03.1-8/.readthedocs.yaml 2025.10.1-1/.readthedocs.yaml
--- 2025.03.1-8/.readthedocs.yaml	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/.readthedocs.yaml	2025-10-10 10:38:05.000000000 +0000
@@ -18,4 +18,5 @@ build:
 conda:
   environment: ci/requirements/doc.yml
 
-formats: []
+formats:
+  - htmlzip
diff -pruN 2025.03.1-8/CLAUDE.md 2025.10.1-1/CLAUDE.md
--- 2025.03.1-8/CLAUDE.md	1970-01-01 00:00:00.000000000 +0000
+++ 2025.10.1-1/CLAUDE.md	2025-10-10 10:38:05.000000000 +0000
@@ -0,0 +1,32 @@
+# xarray development setup
+
+## Setup
+
+```bash
+uv sync
+```
+
+## Run tests
+
+```bash
+uv run pytest xarray -n auto  # All tests in parallel
+uv run pytest xarray/tests/test_dataarray.py  # Specific file
+```
+
+## Linting & type checking
+
+```bash
+pre-commit run --all-files  # Includes ruff and other checks
+uv run dmypy run  # Type checking with mypy
+```
+
+## GitHub Interaction Guidelines
+
+- **NEVER impersonate the user on GitHub**, always sign off with something like
+  "[This is Claude Code on behalf of Jane Doe]"
+- Never create issues nor pull requests on the xarray GitHub repository unless
+  explicitly instructed
+- Never post "update" messages, progress reports, or explanatory comments on
+  GitHub issues/PRs unless specifically instructed
+- When creating commits, always include a co-authorship trailer:
+  `Co-authored-by: Claude <noreply@anthropic.com>`
diff -pruN 2025.03.1-8/CODE_OF_CONDUCT.md 2025.10.1-1/CODE_OF_CONDUCT.md
--- 2025.03.1-8/CODE_OF_CONDUCT.md	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/CODE_OF_CONDUCT.md	2025-10-10 10:38:05.000000000 +0000
@@ -1,46 +1,23 @@
-# Contributor Covenant Code of Conduct
+# NUMFOCUS CODE OF CONDUCT
 
-## Our Pledge
+You can find the full Code of Conduct on the NumFOCUS website: https://numfocus.org/code-of-conduct
 
-In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation.
+## THE SHORT VERSION
 
-## Our Standards
+NumFOCUS is dedicated to providing a harassment-free community for everyone, regardless of gender, sexual orientation, gender identity and expression, disability, physical appearance, body size, race, or religion. We do not tolerate harassment of community members in any form.
 
-Examples of behavior that contributes to creating a positive environment include:
+Be kind to others. Do not insult or put down others. Behave professionally. Remember that harassment and sexist, racist, or exclusionary jokes are not appropriate for NumFOCUS.
 
-- Using welcoming and inclusive language
-- Being respectful of differing viewpoints and experiences
-- Gracefully accepting constructive criticism
-- Focusing on what is best for the community
-- Showing empathy towards other community members
+All communication should be appropriate for a professional audience including people of many different backgrounds. Sexual language and imagery is not appropriate.
 
-Examples of unacceptable behavior by participants include:
+Thank you for helping make this a welcoming, friendly community for all.
 
-- The use of sexualized language or imagery and unwelcome sexual attention or advances
-- Trolling, insulting/derogatory comments, and personal or political attacks
-- Public or private harassment
-- Publishing others' private information, such as a physical or electronic address, without explicit permission
-- Other conduct which could reasonably be considered inappropriate in a professional setting
+## HOW TO REPORT
 
-## Our Responsibilities
+If you feel that the Code of Conduct has been violated, feel free to submit a report, by using the form: [NumFOCUS Code of Conduct Reporting Form](https://numfocus.typeform.com/to/ynjGdT?typeform-source=numfocus.org)
 
-Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior.
+## WHO WILL RECEIVE YOUR REPORT
 
-Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful.
+Your report will be received and handled by NumFOCUS Code of Conduct Working Group; trained, and experienced contributors with diverse backgrounds. The group is making decisions independently from the project, PyData, NumFOCUS or any other organization.
 
-## Scope
-
-This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers.
-
-## Enforcement
-
-Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at xarray-core-team@googlegroups.com. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately.
-
-Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership.
-
-## Attribution
-
-This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [https://contributor-covenant.org/version/1/4][version]
-
-[homepage]: https://contributor-covenant.org
-[version]: https://contributor-covenant.org/version/1/4/
+You can learn more about the current group members, as well as the reporting procedure here: https://numfocus.org/code-of-conduct
diff -pruN 2025.03.1-8/CONTRIBUTING.md 2025.10.1-1/CONTRIBUTING.md
--- 2025.03.1-8/CONTRIBUTING.md	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/CONTRIBUTING.md	2025-10-10 10:38:05.000000000 +0000
@@ -1 +1 @@
-Xarray's contributor guidelines [can be found in our online documentation](https://docs.xarray.dev/en/stable/contributing.html)
+Xarray's contributor guidelines [can be found in our online documentation](https://docs.xarray.dev/en/stable/contribute/contributing.html)
diff -pruN 2025.03.1-8/CORE_TEAM_GUIDE.md 2025.10.1-1/CORE_TEAM_GUIDE.md
--- 2025.03.1-8/CORE_TEAM_GUIDE.md	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/CORE_TEAM_GUIDE.md	2025-10-10 10:38:05.000000000 +0000
@@ -272,7 +272,7 @@ resources such as:
 - [`pre-commit`](https://pre-commit.com) hooks for autoformatting.
 - [`ruff`](https://github.com/astral-sh/ruff) autoformatting and linting.
 - [python-xarray](https://stackoverflow.com/questions/tagged/python-xarray) on Stack Overflow.
-- [@xarray_dev](https://twitter.com/xarray_dev) on Twitter.
+- [@xarray_dev](https://x.com/xarray_dev) on X.
 - [xarray-dev](https://discord.gg/bsSGdwBn) discord community (normally only used for remote synchronous chat during sprints).
 
 You are not required to monitor any of the social resources.
diff -pruN 2025.03.1-8/HOW_TO_RELEASE.md 2025.10.1-1/HOW_TO_RELEASE.md
--- 2025.03.1-8/HOW_TO_RELEASE.md	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/HOW_TO_RELEASE.md	2025-10-10 10:38:05.000000000 +0000
@@ -48,8 +48,11 @@ upstream        https://github.com/pydat
       release.
 5.  Open a PR with the release summary and whatsnew changes; in particular the
     release headline should get feedback from the team on what's important to include.
+    Apply the `Release` label to the PR to trigger a test build action.
+
 6.  After merging, again ensure your main branch is synced to upstream:
     ```sh
+    git switch main
     git pull upstream main
     ```
 7.  If you have any doubts, run the full test suite one final time!
@@ -96,28 +99,27 @@ upstream        https://github.com/pydat
 
     ```
 
-12. Commit your changes and push to main again:
+12. Make a PR with these changes and merge it:
 
     ```sh
-    git commit -am 'New whatsnew section'
-    git push upstream main
+    git checkout -b empty-whatsnew-YYYY.MM.X+1
+    git commit -am "empty whatsnew"
+    git push
     ```
 
-    You're done pushing to main!
-
-13. Update the version available on pyodide:
+    (Note that repo branch restrictions prevent pushing to `main`, so you have to just-self-merge this.)
 
+13. Consider updating the version available on pyodide:
     - Open the PyPI page for [Xarray downloads](https://pypi.org/project/xarray/#files)
-    - Edit [`pyodide/packages/xarray/meta.yaml`](https://github.com/pyodide/pyodide/blob/main/packages/xarray/meta.yaml) to update the
+    - Edit [`packages/xarray/meta.yaml`](https://github.com/pyodide/pyodide-recipes/blob/main/packages/xarray/meta.yaml) to update the
       - version number
       - link to the wheel (under "Built Distribution" on the PyPI page)
       - SHA256 hash (Click "Show Hashes" next to the link to the wheel)
-    - Open a pull request to pyodide
+    - Open a pull request to pyodide-recipes
 
-14. Issue the release announcement to mailing lists & Twitter. For bug fix releases, I
+14. Issue the release announcement to mailing lists & Twitter (X). For bug fix releases, I
     usually only email xarray@googlegroups.com. For major/feature releases, I will email a broader
     list (no more than once every 3-6 months):
-
     - pydata@googlegroups.com
     - xarray@googlegroups.com
     - numpy-discussion@scipy.org
diff -pruN 2025.03.1-8/README.md 2025.10.1-1/README.md
--- 2025.03.1-8/README.md	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/README.md	2025-10-10 10:38:05.000000000 +0000
@@ -11,7 +11,8 @@
 [![Conda - Downloads](https://img.shields.io/conda/dn/anaconda/xarray?label=conda%7Cdownloads)](https://anaconda.org/anaconda/xarray)
 [![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.11183201.svg)](https://doi.org/10.5281/zenodo.11183201)
 [![Examples on binder](https://img.shields.io/badge/launch-binder-579ACA.svg?logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAFkAAABZCAMAAABi1XidAAAB8lBMVEX///9XmsrmZYH1olJXmsr1olJXmsrmZYH1olJXmsr1olJXmsrmZYH1olL1olJXmsr1olJXmsrmZYH1olL1olJXmsrmZYH1olJXmsr1olL1olJXmsrmZYH1olL1olJXmsrmZYH1olL1olL0nFf1olJXmsrmZYH1olJXmsq8dZb1olJXmsrmZYH1olJXmspXmspXmsr1olL1olJXmsrmZYH1olJXmsr1olL1olJXmsrmZYH1olL1olLeaIVXmsrmZYH1olL1olL1olJXmsrmZYH1olLna31Xmsr1olJXmsr1olJXmsrmZYH1olLqoVr1olJXmsr1olJXmsrmZYH1olL1olKkfaPobXvviGabgadXmsqThKuofKHmZ4Dobnr1olJXmsr1olJXmspXmsr1olJXmsrfZ4TuhWn1olL1olJXmsqBi7X1olJXmspZmslbmMhbmsdemsVfl8ZgmsNim8Jpk8F0m7R4m7F5nLB6jbh7jbiDirOEibOGnKaMhq+PnaCVg6qWg6qegKaff6WhnpKofKGtnomxeZy3noG6dZi+n3vCcpPDcpPGn3bLb4/Mb47UbIrVa4rYoGjdaIbeaIXhoWHmZYHobXvpcHjqdHXreHLroVrsfG/uhGnuh2bwj2Hxk17yl1vzmljzm1j0nlX1olL3AJXWAAAAbXRSTlMAEBAQHx8gICAuLjAwMDw9PUBAQEpQUFBXV1hgYGBkcHBwcXl8gICAgoiIkJCQlJicnJ2goKCmqK+wsLC4usDAwMjP0NDQ1NbW3Nzg4ODi5+3v8PDw8/T09PX29vb39/f5+fr7+/z8/Pz9/v7+zczCxgAABC5JREFUeAHN1ul3k0UUBvCb1CTVpmpaitAGSLSpSuKCLWpbTKNJFGlcSMAFF63iUmRccNG6gLbuxkXU66JAUef/9LSpmXnyLr3T5AO/rzl5zj137p136BISy44fKJXuGN/d19PUfYeO67Znqtf2KH33Id1psXoFdW30sPZ1sMvs2D060AHqws4FHeJojLZqnw53cmfvg+XR8mC0OEjuxrXEkX5ydeVJLVIlV0e10PXk5k7dYeHu7Cj1j+49uKg7uLU61tGLw1lq27ugQYlclHC4bgv7VQ+TAyj5Zc/UjsPvs1sd5cWryWObtvWT2EPa4rtnWW3JkpjggEpbOsPr7F7EyNewtpBIslA7p43HCsnwooXTEc3UmPmCNn5lrqTJxy6nRmcavGZVt/3Da2pD5NHvsOHJCrdc1G2r3DITpU7yic7w/7Rxnjc0kt5GC4djiv2Sz3Fb2iEZg41/ddsFDoyuYrIkmFehz0HR2thPgQqMyQYb2OtB0WxsZ3BeG3+wpRb1vzl2UYBog8FfGhttFKjtAclnZYrRo9ryG9uG/FZQU4AEg8ZE9LjGMzTmqKXPLnlWVnIlQQTvxJf8ip7VgjZjyVPrjw1te5otM7RmP7xm+sK2Gv9I8Gi++BRbEkR9EBw8zRUcKxwp73xkaLiqQb+kGduJTNHG72zcW9LoJgqQxpP3/Tj//c3yB0tqzaml05/+orHLksVO+95kX7/7qgJvnjlrfr2Ggsyx0eoy9uPzN5SPd86aXggOsEKW2Prz7du3VID3/tzs/sSRs2w7ovVHKtjrX2pd7ZMlTxAYfBAL9jiDwfLkq55Tm7ifhMlTGPyCAs7RFRhn47JnlcB9RM5T97ASuZXIcVNuUDIndpDbdsfrqsOppeXl5Y+XVKdjFCTh+zGaVuj0d9zy05PPK3QzBamxdwtTCrzyg/2Rvf2EstUjordGwa/kx9mSJLr8mLLtCW8HHGJc2R5hS219IiF6PnTusOqcMl57gm0Z8kanKMAQg0qSyuZfn7zItsbGyO9QlnxY0eCuD1XL2ys/MsrQhltE7Ug0uFOzufJFE2PxBo/YAx8XPPdDwWN0MrDRYIZF0mSMKCNHgaIVFoBbNoLJ7tEQDKxGF0kcLQimojCZopv0OkNOyWCCg9XMVAi7ARJzQdM2QUh0gmBozjc3Skg6dSBRqDGYSUOu66Zg+I2fNZs/M3/f/Grl/XnyF1Gw3VKCez0PN5IUfFLqvgUN4C0qNqYs5YhPL+aVZYDE4IpUk57oSFnJm4FyCqqOE0jhY2SMyLFoo56zyo6becOS5UVDdj7Vih0zp+tcMhwRpBeLyqtIjlJKAIZSbI8SGSF3k0pA3mR5tHuwPFoa7N7reoq2bqCsAk1HqCu5uvI1n6JuRXI+S1Mco54YmYTwcn6Aeic+kssXi8XpXC4V3t7/ADuTNKaQJdScAAAAAElFTkSuQmCC)](https://mybinder.org/v2/gh/pydata/xarray/main?urlpath=lab/tree/doc/examples/weather-data.ipynb)
-[![Twitter](https://img.shields.io/twitter/follow/xarray_dev?style=social)](https://twitter.com/xarray_dev)
+[![Twitter](https://img.shields.io/twitter/follow/xarray_dev?style=social)](https://x.com/xarray_dev)
+[![image](https://img.shields.io/endpoint?url=https://raw.githubusercontent.com/pydata/xarray/refs/heads/main/doc/badge.json)](https://xarray.dev)
 
 **xarray** (pronounced "ex-array", formerly known as **xray**) is an open source project and Python
 package that makes working with labelled multi-dimensional arrays
diff -pruN 2025.03.1-8/asv_bench/asv.conf.json 2025.10.1-1/asv_bench/asv.conf.json
--- 2025.03.1-8/asv_bench/asv.conf.json	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/asv_bench/asv.conf.json	2025-10-10 10:38:05.000000000 +0000
@@ -60,7 +60,7 @@
   // },
   "matrix": {
     "setuptools_scm": [""], // GH6609
-    "numpy": [""],
+    "numpy": ["2.2"],
     "pandas": [""],
     "netcdf4": [""],
     "scipy": [""],
diff -pruN 2025.03.1-8/asv_bench/benchmarks/README_CI.md 2025.10.1-1/asv_bench/benchmarks/README_CI.md
--- 2025.03.1-8/asv_bench/benchmarks/README_CI.md	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/asv_bench/benchmarks/README_CI.md	2025-10-10 10:38:05.000000000 +0000
@@ -115,8 +115,10 @@ To minimize the time required to run the
 ```python
 from . import _skip_slow  # this function is defined in benchmarks.__init__
 
+
 def time_something_slow():
     pass
 
+
 time_something.setup = _skip_slow
 ```
diff -pruN 2025.03.1-8/asv_bench/benchmarks/dataset_io.py 2025.10.1-1/asv_bench/benchmarks/dataset_io.py
--- 2025.03.1-8/asv_bench/benchmarks/dataset_io.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/asv_bench/benchmarks/dataset_io.py	2025-10-10 10:38:05.000000000 +0000
@@ -678,13 +678,7 @@ class IOReadCustomEngine:
                 lock: xr.backends.locks.SerializableLock | None = None,
                 autoclose: bool = False,
             ):
-                if lock is None:
-                    if mode == "r":
-                        locker = xr.backends.locks.SerializableLock()
-                    else:
-                        locker = xr.backends.locks.SerializableLock()
-                else:
-                    locker = lock
+                locker = lock or xr.backends.locks.SerializableLock()
 
                 manager = xr.backends.CachingFileManager(
                     xr.backends.DummyFileManager,
diff -pruN 2025.03.1-8/asv_bench/benchmarks/indexing.py 2025.10.1-1/asv_bench/benchmarks/indexing.py
--- 2025.03.1-8/asv_bench/benchmarks/indexing.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/asv_bench/benchmarks/indexing.py	2025-10-10 10:38:05.000000000 +0000
@@ -39,18 +39,30 @@ outer_assignment_values = {
     "2d-1scalar": xr.DataArray(randn(100, frac_nan=0.1), dims=["x"]),
 }
 
-vectorized_indexes = {
-    "1-1d": {"x": xr.DataArray(randint(0, nx, 400), dims="a")},
-    "2-1d": {
-        "x": xr.DataArray(randint(0, nx, 400), dims="a"),
-        "y": xr.DataArray(randint(0, ny, 400), dims="a"),
-    },
-    "3-2d": {
-        "x": xr.DataArray(randint(0, nx, 400).reshape(4, 100), dims=["a", "b"]),
-        "y": xr.DataArray(randint(0, ny, 400).reshape(4, 100), dims=["a", "b"]),
-        "t": xr.DataArray(randint(0, nt, 400).reshape(4, 100), dims=["a", "b"]),
-    },
-}
+
+def make_vectorized_indexes(n_index):
+    return {
+        "1-1d": {"x": xr.DataArray(randint(0, nx, n_index), dims="a")},
+        "2-1d": {
+            "x": xr.DataArray(randint(0, nx, n_index), dims="a"),
+            "y": xr.DataArray(randint(0, ny, n_index), dims="a"),
+        },
+        "3-2d": {
+            "x": xr.DataArray(
+                randint(0, nx, n_index).reshape(n_index // 100, 100), dims=["a", "b"]
+            ),
+            "y": xr.DataArray(
+                randint(0, ny, n_index).reshape(n_index // 100, 100), dims=["a", "b"]
+            ),
+            "t": xr.DataArray(
+                randint(0, nt, n_index).reshape(n_index // 100, 100), dims=["a", "b"]
+            ),
+        },
+    }
+
+
+vectorized_indexes = make_vectorized_indexes(400)
+big_vectorized_indexes = make_vectorized_indexes(400_000)
 
 vectorized_assignment_values = {
     "1-1d": xr.DataArray(randn((400, ny)), dims=["a", "y"], coords={"a": randn(400)}),
@@ -101,6 +113,20 @@ class Indexing(Base):
         self.ds_large.isel(**basic_indexes[key]).load()
 
 
+class IndexingOnly(Base):
+    @parameterized(["key"], [list(basic_indexes.keys())])
+    def time_indexing_basic(self, key):
+        self.ds.isel(**basic_indexes[key])
+
+    @parameterized(["key"], [list(outer_indexes.keys())])
+    def time_indexing_outer(self, key):
+        self.ds.isel(**outer_indexes[key])
+
+    @parameterized(["key"], [list(big_vectorized_indexes.keys())])
+    def time_indexing_big_vectorized(self, key):
+        self.ds.isel(**big_vectorized_indexes[key])
+
+
 class Assignment(Base):
     @parameterized(["key"], [list(basic_indexes.keys())])
     def time_assignment_basic(self, key):
diff -pruN 2025.03.1-8/asv_bench/benchmarks/interp.py 2025.10.1-1/asv_bench/benchmarks/interp.py
--- 2025.03.1-8/asv_bench/benchmarks/interp.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/asv_bench/benchmarks/interp.py	2025-10-10 10:38:05.000000000 +0000
@@ -25,23 +25,37 @@ class Interpolation:
                 "var1": (("x", "y"), randn_xy),
                 "var2": (("x", "t"), randn_xt),
                 "var3": (("t",), randn_t),
+                "var4": (("z",), np.array(["text"])),
+                "var5": (("k",), np.array(["a", "b", "c"])),
             },
             coords={
                 "x": np.arange(nx),
                 "y": np.linspace(0, 1, ny),
                 "t": pd.date_range("1970-01-01", periods=nt, freq="D"),
                 "x_coords": ("x", np.linspace(1.1, 2.1, nx)),
+                "z": np.array([1]),
+                "k": np.linspace(0, nx, 3),
             },
         )
 
     @parameterized(["method", "is_short"], (["linear", "cubic"], [True, False]))
-    def time_interpolation(self, method, is_short):
+    def time_interpolation_numeric_1d(self, method, is_short):
         new_x = new_x_short if is_short else new_x_long
-        self.ds.interp(x=new_x, method=method).load()
+        self.ds.interp(x=new_x, method=method).compute()
 
     @parameterized(["method"], (["linear", "nearest"]))
-    def time_interpolation_2d(self, method):
-        self.ds.interp(x=new_x_long, y=new_y_long, method=method).load()
+    def time_interpolation_numeric_2d(self, method):
+        self.ds.interp(x=new_x_long, y=new_y_long, method=method).compute()
+
+    @parameterized(["is_short"], ([True, False]))
+    def time_interpolation_string_scalar(self, is_short):
+        new_z = new_x_short if is_short else new_x_long
+        self.ds.interp(z=new_z).compute()
+
+    @parameterized(["is_short"], ([True, False]))
+    def time_interpolation_string_1d(self, is_short):
+        new_k = new_x_short if is_short else new_x_long
+        self.ds.interp(k=new_k).compute()
 
 
 class InterpolationDask(Interpolation):
diff -pruN 2025.03.1-8/asv_bench/benchmarks/repr.py 2025.10.1-1/asv_bench/benchmarks/repr.py
--- 2025.03.1-8/asv_bench/benchmarks/repr.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/asv_bench/benchmarks/repr.py	2025-10-10 10:38:05.000000000 +0000
@@ -25,6 +25,25 @@ class Repr:
         self.ds._repr_html_()
 
 
+class ReprDataTree:
+    def setup(self):
+        # construct a datatree with 500 nodes
+        number_of_files = 20
+        number_of_groups = 25
+        tree_dict = {}
+        for f in range(number_of_files):
+            for g in range(number_of_groups):
+                tree_dict[f"file_{f}/group_{g}"] = xr.Dataset({"g": f * g})
+
+        self.dt = xr.DataTree.from_dict(tree_dict)
+
+    def time_repr(self):
+        repr(self.dt)
+
+    def time_repr_html(self):
+        self.dt._repr_html_()
+
+
 class ReprMultiIndex:
     def setup(self):
         index = pd.MultiIndex.from_product(
@@ -38,3 +57,31 @@ class ReprMultiIndex:
 
     def time_repr_html(self):
         self.da._repr_html_()
+
+
+class ReprPandasRangeIndex:
+    # display a memory-saving pandas.RangeIndex shouldn't trigger memory
+    # expensive conversion into a numpy array
+    def setup(self):
+        index = xr.indexes.PandasIndex(pd.RangeIndex(1_000_000), "x")
+        self.ds = xr.Dataset(coords=xr.Coordinates.from_xindex(index))
+
+    def time_repr(self):
+        repr(self.ds.x)
+
+    def time_repr_html(self):
+        self.ds.x._repr_html_()
+
+
+class ReprXarrayRangeIndex:
+    # display an Xarray RangeIndex shouldn't trigger memory expensive conversion
+    # of its lazy coordinate into a numpy array
+    def setup(self):
+        index = xr.indexes.RangeIndex.arange(1_000_000, dim="x")
+        self.ds = xr.Dataset(coords=xr.Coordinates.from_xindex(index))
+
+    def time_repr(self):
+        repr(self.ds.x)
+
+    def time_repr_html(self):
+        self.ds.x._repr_html_()
diff -pruN 2025.03.1-8/ci/install-upstream-wheels.sh 2025.10.1-1/ci/install-upstream-wheels.sh
--- 2025.03.1-8/ci/install-upstream-wheels.sh	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/ci/install-upstream-wheels.sh	2025-10-10 10:38:05.000000000 +0000
@@ -35,15 +35,7 @@ python -m pip install \
     numpy \
     scipy \
     matplotlib \
-    pandas
-# for some reason pandas depends on pyarrow already.
-# Remove once a `pyarrow` version compiled with `numpy>=2.0` is on `conda-forge`
-python -m pip install \
-    -i https://pypi.fury.io/arrow-nightlies/ \
-    --prefer-binary \
-    --no-deps \
-    --pre \
-    --upgrade \
+    pandas \
     pyarrow
 # manually install `pint`, `donfig`, and `crc32c` to pull in new dependencies
 python -m pip install --upgrade pint donfig crc32c
diff -pruN 2025.03.1-8/ci/minimum_versions.py 2025.10.1-1/ci/minimum_versions.py
--- 2025.03.1-8/ci/minimum_versions.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/ci/minimum_versions.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,324 +0,0 @@
-import asyncio
-import bisect
-import datetime
-import pathlib
-import sys
-from dataclasses import dataclass, field
-
-import rich_click as click
-import yaml
-from dateutil.relativedelta import relativedelta
-from rattler import Gateway, Version
-from rich.console import Console
-from rich.panel import Panel
-from rich.style import Style
-from rich.table import Column, Table
-from tlz.functoolz import curry, pipe
-from tlz.itertoolz import concat, groupby
-
-click.rich_click.SHOW_ARGUMENTS = True
-
-channels = ["conda-forge"]
-platforms = ["noarch", "linux-64"]
-ignored_packages = [
-    "coveralls",
-    "pip",
-    "pytest",
-    "pytest-cov",
-    "pytest-env",
-    "pytest-mypy-plugins",
-    "pytest-timeout",
-    "pytest-xdist",
-    "hypothesis",
-]
-
-
-@dataclass
-class Policy:
-    package_months: dict
-    default_months: int
-    overrides: dict[str, Version] = field(default_factory=dict)
-
-    def minimum_version(self, package_name, releases):
-        if (override := self.overrides.get(package_name)) is not None:
-            return override
-
-        policy_months = self.package_months.get(package_name, self.default_months)
-        today = datetime.date.today()
-
-        cutoff_date = today - relativedelta(months=policy_months)
-
-        index = bisect.bisect_left(
-            releases, cutoff_date, key=lambda x: x.timestamp.date()
-        )
-        return releases[index - 1 if index > 0 else 0]
-
-
-@dataclass
-class Spec:
-    name: str
-    version: Version | None
-
-    @classmethod
-    def parse(cls, spec_text):
-        warnings = []
-        if ">" in spec_text or "<" in spec_text:
-            warnings.append(
-                f"package should be pinned with an exact version: {spec_text!r}"
-            )
-
-            spec_text = spec_text.replace(">", "").replace("<", "")
-
-        if "=" in spec_text:
-            name, version_text = spec_text.split("=", maxsplit=1)
-            version = Version(version_text)
-            segments = version.segments()
-
-            if len(segments) != 2 or (len(segments) == 3 and segments[2] != 0):
-                warnings.append(
-                    f"package should be pinned to a minor version (got {version})"
-                )
-        else:
-            name = spec_text
-            version = None
-
-        return cls(name, version), (name, warnings)
-
-
-@dataclass(order=True)
-class Release:
-    version: Version
-    build_number: int
-    timestamp: datetime.datetime = field(compare=False)
-
-    @classmethod
-    def from_repodata_record(cls, repo_data):
-        return cls(
-            version=repo_data.version,
-            build_number=repo_data.build_number,
-            timestamp=repo_data.timestamp,
-        )
-
-
-def parse_environment(text):
-    env = yaml.safe_load(text)
-
-    specs = []
-    warnings = []
-    for dep in env["dependencies"]:
-        spec, warnings_ = Spec.parse(dep)
-
-        warnings.append(warnings_)
-        specs.append(spec)
-
-    return specs, warnings
-
-
-def is_preview(version):
-    candidates = ["rc", "beta", "alpha"]
-
-    *_, last_segment = version.segments()
-    return any(candidate in last_segment for candidate in candidates)
-
-
-def group_packages(records):
-    groups = groupby(lambda r: r.name.normalized, records)
-    return {
-        name: sorted(map(Release.from_repodata_record, group))
-        for name, group in groups.items()
-    }
-
-
-def filter_releases(predicate, releases):
-    return {
-        name: [r for r in records if predicate(r)] for name, records in releases.items()
-    }
-
-
-def deduplicate_releases(package_info):
-    def deduplicate(releases):
-        return min(releases, key=lambda p: p.timestamp)
-
-    return {
-        name: list(map(deduplicate, groupby(lambda p: p.version, group).values()))
-        for name, group in package_info.items()
-    }
-
-
-def find_policy_versions(policy, releases):
-    return {
-        name: policy.minimum_version(name, package_releases)
-        for name, package_releases in releases.items()
-    }
-
-
-def is_suitable_release(release):
-    if release.timestamp is None:
-        return False
-
-    segments = release.version.extend_to_length(3).segments()
-
-    return segments[2] == [0]
-
-
-def lookup_spec_release(spec, releases):
-    version = spec.version.extend_to_length(3)
-
-    return releases[spec.name][version]
-
-
-def compare_versions(environments, policy_versions):
-    status = {}
-    for env, specs in environments.items():
-        env_status = any(
-            spec.version > policy_versions[spec.name].version for spec in specs
-        )
-        status[env] = env_status
-    return status
-
-
-def version_comparison_symbol(required, policy):
-    if required < policy:
-        return "<"
-    elif required > policy:
-        return ">"
-    else:
-        return "="
-
-
-def format_bump_table(specs, policy_versions, releases, warnings):
-    table = Table(
-        Column("Package", width=20),
-        Column("Required", width=8),
-        "Required (date)",
-        Column("Policy", width=8),
-        "Policy (date)",
-        "Status",
-    )
-
-    heading_style = Style(color="#ff0000", bold=True)
-    warning_style = Style(color="#ffff00", bold=True)
-    styles = {
-        ">": Style(color="#ff0000", bold=True),
-        "=": Style(color="#008700", bold=True),
-        "<": Style(color="#d78700", bold=True),
-    }
-
-    for spec in specs:
-        policy_release = policy_versions[spec.name]
-        policy_version = policy_release.version.with_segments(0, 2)
-        policy_date = policy_release.timestamp
-
-        required_version = spec.version
-        required_date = lookup_spec_release(spec, releases).timestamp
-
-        status = version_comparison_symbol(required_version, policy_version)
-        style = styles[status]
-
-        table.add_row(
-            spec.name,
-            str(required_version),
-            f"{required_date:%Y-%m-%d}",
-            str(policy_version),
-            f"{policy_date:%Y-%m-%d}",
-            status,
-            style=style,
-        )
-
-    grid = Table.grid(expand=True, padding=(0, 2))
-    grid.add_column(style=heading_style, vertical="middle")
-    grid.add_column()
-    grid.add_row("Version summary", table)
-
-    if any(warnings.values()):
-        warning_table = Table(width=table.width, expand=True)
-        warning_table.add_column("Package")
-        warning_table.add_column("Warning")
-
-        for package, messages in warnings.items():
-            if not messages:
-                continue
-            warning_table.add_row(package, messages[0], style=warning_style)
-            for message in messages[1:]:
-                warning_table.add_row("", message, style=warning_style)
-
-        grid.add_row("Warnings", warning_table)
-
-    return grid
-
-
-@click.command()
-@click.argument(
-    "environment_paths",
-    type=click.Path(exists=True, readable=True, path_type=pathlib.Path),
-    nargs=-1,
-)
-def main(environment_paths):
-    console = Console()
-
-    parsed_environments = {
-        path.stem: parse_environment(path.read_text()) for path in environment_paths
-    }
-
-    warnings = {
-        env: dict(warnings_) for env, (_, warnings_) in parsed_environments.items()
-    }
-    environments = {
-        env: [spec for spec in specs if spec.name not in ignored_packages]
-        for env, (specs, _) in parsed_environments.items()
-    }
-
-    all_packages = list(
-        dict.fromkeys(spec.name for spec in concat(environments.values()))
-    )
-
-    policy_months = {
-        "python": 30,
-        "numpy": 18,
-    }
-    policy_months_default = 12
-    overrides = {}
-
-    policy = Policy(
-        policy_months, default_months=policy_months_default, overrides=overrides
-    )
-
-    gateway = Gateway()
-    query = gateway.query(channels, platforms, all_packages, recursive=False)
-    records = asyncio.run(query)
-
-    package_releases = pipe(
-        records,
-        concat,
-        group_packages,
-        curry(filter_releases, lambda r: r.timestamp is not None),
-        deduplicate_releases,
-    )
-    policy_versions = pipe(
-        package_releases,
-        curry(filter_releases, is_suitable_release),
-        curry(find_policy_versions, policy),
-    )
-    status = compare_versions(environments, policy_versions)
-
-    release_lookup = {
-        n: {r.version: r for r in releases} for n, releases in package_releases.items()
-    }
-    grids = {
-        env: format_bump_table(specs, policy_versions, release_lookup, warnings[env])
-        for env, specs in environments.items()
-    }
-    root_grid = Table.grid()
-    root_grid.add_column()
-
-    for env, grid in grids.items():
-        root_grid.add_row(Panel(grid, title=env, expand=True))
-
-    console.print(root_grid)
-
-    status_code = 1 if any(status.values()) else 0
-    sys.exit(status_code)
-
-
-if __name__ == "__main__":
-    main()
diff -pruN 2025.03.1-8/ci/policy.yaml 2025.10.1-1/ci/policy.yaml
--- 2025.03.1-8/ci/policy.yaml	1970-01-01 00:00:00.000000000 +0000
+++ 2025.10.1-1/ci/policy.yaml	2025-10-10 10:38:05.000000000 +0000
@@ -0,0 +1,28 @@
+channels:
+  - conda-forge
+platforms:
+  - noarch
+  - linux-64
+policy:
+  # all packages in months
+  packages:
+    python: 30
+    numpy: 18
+  default: 12
+  # overrides for the policy
+  overrides: {}
+  # these packages are completely ignored
+  exclude:
+    - coveralls
+    - pip
+    - pytest
+    - pytest-asyncio
+    - pytest-cov
+    - pytest-env
+    - pytest-mypy-plugins
+    - pytest-timeout
+    - pytest-xdist
+    - pytest-hypothesis
+    - hypothesis
+  # these packages don't fail the CI, but will be printed in the report
+  ignored_violations: []
diff -pruN 2025.03.1-8/ci/release_contributors.py 2025.10.1-1/ci/release_contributors.py
--- 2025.03.1-8/ci/release_contributors.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/ci/release_contributors.py	2025-10-10 10:38:05.000000000 +0000
@@ -7,10 +7,36 @@ from tlz.itertoolz import last, unique
 co_author_re = re.compile(r"Co-authored-by: (?P<name>[^<]+?) <(?P<email>.+)>")
 
 
+ignored = [
+    {"name": "dependabot[bot]"},
+    {"name": "pre-commit-ci[bot]"},
+    {
+        "name": "Claude",
+        "email": [
+            "noreply@anthropic.com",
+            "claude@anthropic.com",
+            "no-reply@anthropic.com",
+        ],
+    },
+]
+
+
+def is_ignored(name, email):
+    # linear search, for now
+    for ignore in ignored:
+        if ignore["name"] != name:
+            continue
+        ignored_email = ignore.get("email")
+        if ignored_email is None or email in ignored_email:
+            return True
+
+    return False
+
+
 def main():
     repo = git.Repo(".")
 
-    most_recent_release = last(repo.tags)
+    most_recent_release = last(list(repo.tags))
 
     # extract information from commits
     contributors = {}
@@ -22,11 +48,8 @@ def main():
 
     # deduplicate and ignore
     # TODO: extract ignores from .github/release.yml
-    ignored = ["dependabot", "pre-commit-ci"]
     unique_contributors = unique(
-        contributor
-        for contributor in contributors.values()
-        if contributor.removesuffix("[bot]") not in ignored
+        name for email, name in contributors.items() if not is_ignored(name, email)
     )
 
     sorted_ = sorted(unique_contributors)
diff -pruN 2025.03.1-8/ci/requirements/all-but-dask.yml 2025.10.1-1/ci/requirements/all-but-dask.yml
--- 2025.03.1-8/ci/requirements/all-but-dask.yml	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/ci/requirements/all-but-dask.yml	2025-10-10 10:38:05.000000000 +0000
@@ -4,7 +4,7 @@ channels:
   - nodefaults
 dependencies:
   - aiobotocore
-  - array-api-strict
+  - array-api-strict<2.4
   - boto3
   - bottleneck
   - cartopy
@@ -28,6 +28,7 @@ dependencies:
   - pip
   - pydap
   - pytest
+  - pytest-asyncio
   - pytest-cov
   - pytest-env
   - pytest-mypy-plugins
diff -pruN 2025.03.1-8/ci/requirements/all-but-numba.yml 2025.10.1-1/ci/requirements/all-but-numba.yml
--- 2025.03.1-8/ci/requirements/all-but-numba.yml	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/ci/requirements/all-but-numba.yml	2025-10-10 10:38:05.000000000 +0000
@@ -4,9 +4,9 @@ channels:
   - nodefaults
 dependencies:
   # Pin a "very new numpy" (updated Sept 24, 2024)
-  - numpy>=2.1.1
+  - numpy>=2.2
   - aiobotocore
-  - array-api-strict
+  - array-api-strict<2.4
   - boto3
   - bottleneck
   - cartopy
@@ -41,6 +41,7 @@ dependencies:
   - pyarrow # pandas raises a deprecation warning without this, breaking doctests
   - pydap
   - pytest
+  - pytest-asyncio
   - pytest-cov
   - pytest-env
   - pytest-mypy-plugins
diff -pruN 2025.03.1-8/ci/requirements/bare-min-and-scipy.yml 2025.10.1-1/ci/requirements/bare-min-and-scipy.yml
--- 2025.03.1-8/ci/requirements/bare-min-and-scipy.yml	1970-01-01 00:00:00.000000000 +0000
+++ 2025.10.1-1/ci/requirements/bare-min-and-scipy.yml	2025-10-10 10:38:05.000000000 +0000
@@ -0,0 +1,19 @@
+name: xarray-tests
+channels:
+  - conda-forge
+  - nodefaults
+dependencies:
+  - python=3.11
+  - coveralls
+  - pip
+  - pytest
+  - pytest-asyncio
+  - pytest-cov
+  - pytest-env
+  - pytest-mypy-plugins
+  - pytest-timeout
+  - pytest-xdist
+  - numpy=1.26
+  - packaging=24.1
+  - pandas=2.2
+  - scipy=1.13
diff -pruN 2025.03.1-8/ci/requirements/bare-minimum.yml 2025.10.1-1/ci/requirements/bare-minimum.yml
--- 2025.03.1-8/ci/requirements/bare-minimum.yml	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/ci/requirements/bare-minimum.yml	2025-10-10 10:38:05.000000000 +0000
@@ -3,15 +3,16 @@ channels:
   - conda-forge
   - nodefaults
 dependencies:
-  - python=3.10
+  - python=3.11
   - coveralls
   - pip
   - pytest
+  - pytest-asyncio
   - pytest-cov
   - pytest-env
   - pytest-mypy-plugins
   - pytest-timeout
   - pytest-xdist
-  - numpy=1.24
-  - packaging=23.1
-  - pandas=2.1
+  - numpy=1.26
+  - packaging=24.1
+  - pandas=2.2
diff -pruN 2025.03.1-8/ci/requirements/doc.yml 2025.10.1-1/ci/requirements/doc.yml
--- 2025.03.1-8/ci/requirements/doc.yml	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/ci/requirements/doc.yml	2025-10-10 10:38:05.000000000 +0000
@@ -4,49 +4,50 @@ channels:
   - conda-forge
   - nodefaults
 dependencies:
-  - python=3.12
+  - python
   - bottleneck
   - cartopy
   - cfgrib
   - kerchunk
-  - dask-core>=2022.1
-  - hypothesis>=6.75.8
-  - h5netcdf>=0.13
+  - dask-core
+  - hypothesis
+  - h5netcdf
   - ipykernel
   - ipywidgets # silence nbsphinx warning
   - ipython
-  - iris>=2.3
+  - iris
   - jupyter_client
+  - jupyter_sphinx
   - matplotlib-base
   - nbsphinx
   - ncdata
-  - netcdf4>=1.5
+  - netcdf4
   - numba
-  - numpy>=2
-  - packaging>=23.2
-  - pandas>=1.4,!=2.1.0
+  - numpy>=2.2
+  - packaging
+  - pandas
   - pooch
   - pip
   - pre-commit
   - pyarrow
+  - pydata-sphinx-theme
   - pyproj
   - rich # for Zarr tree()
-  - scipy!=1.10.0
+  - scipy
   - seaborn
   - setuptools
   - sparse
   - sphinx-autosummary-accessors
-  - sphinx-book-theme<=1.0.1
   - sphinx-copybutton
   - sphinx-design
   - sphinx-inline-tabs
-  - sphinx>=5.0,<7.0 # https://github.com/executablebooks/sphinx-book-theme/issues/749
+  - sphinx>=6,<8
+  - sphinxcontrib-mermaid
   - sphinxcontrib-srclinks
   - sphinx-remove-toctrees
   - sphinxext-opengraph
   - sphinxext-rediraffe
-  - zarr>=2.10
+  - zarr
   - pip:
-      - sphinxcontrib-mermaid
       # relative to this file. Needs to be editable to be accepted.
       - -e ../..
diff -pruN 2025.03.1-8/ci/requirements/environment-3.14.yml 2025.10.1-1/ci/requirements/environment-3.14.yml
--- 2025.03.1-8/ci/requirements/environment-3.14.yml	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/ci/requirements/environment-3.14.yml	2025-10-10 10:38:05.000000000 +0000
@@ -4,7 +4,7 @@ channels:
   - nodefaults
 dependencies:
   - aiobotocore
-  - array-api-strict
+  - array-api-strict<2.4
   - boto3
   - bottleneck
   - cartopy
@@ -37,6 +37,7 @@ dependencies:
   - pyarrow # pandas raises a deprecation warning without this, breaking doctests
   - pydap
   - pytest
+  - pytest-asyncio
   - pytest-cov
   - pytest-env
   - pytest-mypy-plugins
diff -pruN 2025.03.1-8/ci/requirements/environment-benchmark.yml 2025.10.1-1/ci/requirements/environment-benchmark.yml
--- 2025.03.1-8/ci/requirements/environment-benchmark.yml	1970-01-01 00:00:00.000000000 +0000
+++ 2025.10.1-1/ci/requirements/environment-benchmark.yml	2025-10-10 10:38:05.000000000 +0000
@@ -0,0 +1,23 @@
+name: xarray-benchmark
+channels:
+  - conda-forge
+  - nodefaults
+dependencies:
+  - bottleneck
+  - cftime
+  - dask-core
+  - distributed
+  - flox
+  - netcdf4
+  - numba
+  - numbagg
+  - numexpr
+  - numpy>=2.2,<2.3 # https://github.com/numba/numba/issues/10105
+  - opt_einsum
+  - packaging
+  - pandas
+  - pyarrow # pandas raises a deprecation warning without this, breaking doctests
+  - sparse
+  - scipy
+  - toolz
+  - zarr
diff -pruN 2025.03.1-8/ci/requirements/environment-windows-3.14.yml 2025.10.1-1/ci/requirements/environment-windows-3.14.yml
--- 2025.03.1-8/ci/requirements/environment-windows-3.14.yml	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/ci/requirements/environment-windows-3.14.yml	2025-10-10 10:38:05.000000000 +0000
@@ -2,7 +2,7 @@ name: xarray-tests
 channels:
   - conda-forge
 dependencies:
-  - array-api-strict
+  - array-api-strict<2.4
   - boto3
   - bottleneck
   - cartopy
@@ -32,6 +32,7 @@ dependencies:
   - pyarrow # importing dask.dataframe raises an ImportError without this
   - pydap
   - pytest
+  - pytest-asyncio
   - pytest-cov
   - pytest-env
   - pytest-mypy-plugins
diff -pruN 2025.03.1-8/ci/requirements/environment-windows.yml 2025.10.1-1/ci/requirements/environment-windows.yml
--- 2025.03.1-8/ci/requirements/environment-windows.yml	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/ci/requirements/environment-windows.yml	2025-10-10 10:38:05.000000000 +0000
@@ -2,7 +2,7 @@ name: xarray-tests
 channels:
   - conda-forge
 dependencies:
-  - array-api-strict
+  - array-api-strict<2.4
   - boto3
   - bottleneck
   - cartopy
@@ -32,6 +32,7 @@ dependencies:
   - pyarrow # importing dask.dataframe raises an ImportError without this
   - pydap
   - pytest
+  - pytest-asyncio
   - pytest-cov
   - pytest-env
   - pytest-mypy-plugins
diff -pruN 2025.03.1-8/ci/requirements/environment.yml 2025.10.1-1/ci/requirements/environment.yml
--- 2025.03.1-8/ci/requirements/environment.yml	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/ci/requirements/environment.yml	2025-10-10 10:38:05.000000000 +0000
@@ -4,7 +4,7 @@ channels:
   - nodefaults
 dependencies:
   - aiobotocore
-  - array-api-strict
+  - array-api-strict<2.4
   - boto3
   - bottleneck
   - cartopy
@@ -20,12 +20,13 @@ dependencies:
   - iris
   - lxml # Optional dep of pydap
   - matplotlib-base
+  - mypy==1.18.1
   - nc-time-axis
   - netcdf4
   - numba
   - numbagg
   - numexpr
-  - numpy>=2
+  - numpy>=2.2
   - opt_einsum
   - packaging
   - pandas
@@ -38,6 +39,7 @@ dependencies:
   - pydap
   - pydap-server
   - pytest
+  - pytest-asyncio
   - pytest-cov
   - pytest-env
   - pytest-mypy-plugins
@@ -55,6 +57,7 @@ dependencies:
   - types-python-dateutil
   - types-pytz
   - types-PyYAML
+  - types-requests
   - types-setuptools
   - types-openpyxl
   - typing_extensions
diff -pruN 2025.03.1-8/ci/requirements/min-all-deps.yml 2025.10.1-1/ci/requirements/min-all-deps.yml
--- 2025.03.1-8/ci/requirements/min-all-deps.yml	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/ci/requirements/min-all-deps.yml	2025-10-10 10:38:05.000000000 +0000
@@ -7,52 +7,48 @@ dependencies:
   # Run ci/min_deps_check.py to verify that this file respects the policy.
   # When upgrading python, numpy, or pandas, must also change
   # doc/user-guide/installing.rst, doc/user-guide/plotting.rst and setup.py.
-  - python=3.10
-  - array-api-strict=1.0 # dependency for testing the array api compat
-  - boto3=1.29
-  - bottleneck=1.3
-  - cartopy=0.22
+  - python=3.11
+  - array-api-strict=1.1 # dependency for testing the array api compat
+  - boto3=1.34
+  - bottleneck=1.4
+  - cartopy=0.23
   - cftime=1.6
   - coveralls
-  - dask-core=2023.11
-  - distributed=2023.11
-  # Flox > 0.8 has a bug with numbagg versions
-  # It will require numbagg > 0.6
-  # so we should just skip that series eventually
-  # or keep flox pinned for longer than necessary
-  - flox=0.7
+  - dask-core=2024.6
+  - distributed=2024.6
+  - flox=0.9
   - h5netcdf=1.3
   # h5py and hdf5 tend to cause conflicts
   # for e.g. hdf5 1.12 conflicts with h5py=3.1
   # prioritize bumping other packages instead
-  - h5py=3.8
-  - hdf5=1.12
+  - h5py=3.11
+  - hdf5=1.14
   - hypothesis
-  - iris=3.7
-  - lxml=4.9 # Optional dep of pydap
+  - iris=3.9
+  - lxml=5.1 # Optional dep of pydap
   - matplotlib-base=3.8
   - nc-time-axis=1.4
   # netcdf follows a 1.major.minor[.patch] convention
   # (see https://github.com/Unidata/netcdf4-python/issues/1090)
-  - netcdf4=1.6.0
-  - numba=0.57
-  - numbagg=0.6
-  - numpy=1.24
-  - packaging=23.2
-  - pandas=2.1
-  - pint=0.22
+  - netcdf4=1.6
+  - numba=0.60
+  - numbagg=0.8
+  - numpy=1.26
+  - packaging=24.1
+  - pandas=2.2
+  - pint=0.24
   - pip
-  - pydap=3.4
+  - pydap=3.5.0
   - pytest
+  - pytest-asyncio
   - pytest-cov
   - pytest-env
   - pytest-mypy-plugins
   - pytest-timeout
   - pytest-xdist
   - rasterio=1.3
-  - scipy=1.11
+  - scipy=1.13
   - seaborn=0.13
-  - sparse=0.14
+  - sparse=0.15
   - toolz=0.12
-  - typing_extensions=4.8
-  - zarr=2.16
+  - zarr=2.18
diff -pruN 2025.03.1-8/conftest.py 2025.10.1-1/conftest.py
--- 2025.03.1-8/conftest.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/conftest.py	2025-10-10 10:38:05.000000000 +0000
@@ -42,6 +42,12 @@ def pytest_collection_modifyitems(items)
 
 
 @pytest.fixture(autouse=True)
+def set_zarr_v3_api(monkeypatch):
+    """Set ZARR_V3_EXPERIMENTAL_API environment variable for all tests."""
+    monkeypatch.setenv("ZARR_V3_EXPERIMENTAL_API", "1")
+
+
+@pytest.fixture(autouse=True)
 def add_standard_imports(doctest_namespace, tmpdir):
     import numpy as np
     import pandas as pd
diff -pruN 2025.03.1-8/debian/changelog 2025.10.1-1/debian/changelog
--- 2025.03.1-8/debian/changelog	2025-05-05 09:04:03.000000000 +0000
+++ 2025.10.1-1/debian/changelog	2025-10-22 11:37:31.000000000 +0000
@@ -1,3 +1,66 @@
+python-xarray (2025.10.1-1) unstable; urgency=medium
+
+  * New upstream release
+  * xfails-that-break-pytest.patch: fixed upstream
+
+ -- Alastair McKinstry <mckinstry@debian.org>  Wed, 22 Oct 2025 12:37:31 +0100
+
+python-xarray (2025.09.0-1) unstable; urgency=medium
+
+  * Team upload.
+  * New upstream version
+  * Refreshed patches. Dropped disable-netcdf-roundtrip-tests.patch in
+    favor of upstream's fix.
+  * d/rules: skip mypy tests, they are only for upstream devs.
+    Closes: #1115136, #1115691
+
+ -- Michael R. Crusoe <crusoe@debian.org>  Mon, 22 Sep 2025 13:02:51 +0200
+
+python-xarray (2025.08.0-3) unstable; urgency=medium
+
+  * Team upload.
+  * Added patch to workaround slow debci runners.
+
+ -- Michael R. Crusoe <crusoe@debian.org>  Thu, 04 Sep 2025 16:05:37 +0200
+
+python-xarray (2025.08.0-2) unstable; urgency=medium
+
+  * Team upload.
+  * pybuild-autopkgtest: skip on 32-bits archs, they are unsupported.
+  * d/rules: resume running the tests in parallel using
+    DEB_BUILD_OPTION_PARALLEL
+  * Remove trailing whitespace in debian/changelog (routine-update)
+  * Remove trailing whitespace in debian/rules (routine-update)
+  * Set upstream metadata fields: Documentation, Repository.
+  * Remove unused license definitions for Python-2.0, BSD-3-Clause.
+  * Fix day-of-week for changelog entry 2024.07.0-1.
+
+ -- Michael R. Crusoe <crusoe@debian.org>  Tue, 02 Sep 2025 21:14:38 +0200
+
+python-xarray (2025.08.0-1) unstable; urgency=medium
+
+  * New upstream release. Closes: #1110804
+  * B-D on pytest3-asyncio, drop sparse again for now
+  * Drop zarr support this release
+
+ -- Alastair McKinstry <mckinstry@debian.org>  Wed, 20 Aug 2025 09:14:38 +0100
+
+python-xarray (2025.06.1-1) experimental; urgency=medium
+
+  * New upstream release
+  * No longer conflict with python3-sparse, instead build-depend
+  * Add docs for hypothesis, matplotlib, zarr
+  * Don't build docs for this release
+
+ -- Alastair McKinstry <mckinstry@debian.org>  Sun, 15 Jun 2025 10:37:14 +0100
+
+python-xarray (2025.04.0-1) experimental; urgency=medium
+
+  * New upstream release
+  * Build-dep on python3-juptyer-sphinx
+
+ -- Alastair McKinstry <mckinstry@debian.org>  Tue, 13 May 2025 11:21:57 +0100
+
 python-xarray (2025.03.1-8) unstable; urgency=medium
 
   * Team upload
@@ -68,7 +131,7 @@ python-xarray (2025.03.0-1) unstable; ur
   * Standards-Version: 4.7.2
   * import-pytest.patch - fix errors on missing import
   * 2 more dask tests labeled xfail
-	
+
  -- Alastair McKinstry <mckinstry@debian.org>  Wed, 26 Mar 2025 13:04:52 +0000
 
 python-xarray (2025.01.2-2) unstable; urgency=medium
@@ -154,7 +217,7 @@ python-xarray (2024.07.0-1) unstable; ur
     for pytest 8. Closes: #1063981
   * Annotate python3-cfgrib as <!nocheck>. Closes: #1078351
 
- -- Alastair McKinstry <mckinstry@debian.org>  Tue, 19 Aug 2024 09:30:21 +0100
+ -- Alastair McKinstry <mckinstry@debian.org>  Mon, 19 Aug 2024 09:30:21 +0100
 
 python-xarray (2024.06.0-1) experimental; urgency=medium
 
@@ -170,8 +233,8 @@ python-xarray (2024.03.0-1) unstable; ur
   * New upstream release
   * Re-enable docs
   * Build-dep on new python3-sphinx-copybutton, python3-sphinxext-rediraffe,
-    python3-sphinxext-opengraph, python3-sphinx-design, 
-    python3-sphinx-book-theme 
+    python3-sphinxext-opengraph, python3-sphinx-design,
+    python3-sphinx-book-theme
   * Drop no-sphinx-* patches
 
  -- Alastair McKinstry <mckinstry@debian.org>  Sun, 31 Mar 2024 08:17:40 +0100
diff -pruN 2025.03.1-8/debian/control 2025.10.1-1/debian/control
--- 2025.03.1-8/debian/control	2025-05-05 09:04:03.000000000 +0000
+++ 2025.10.1-1/debian/control	2025-10-22 11:37:31.000000000 +0000
@@ -3,6 +3,7 @@ Maintainer: Debian Science Maintainers <
 Uploaders: Ghislain Antony Vaillant <ghisvail@gmail.com>,
            Alastair McKinstry <mckinstry@debian.org>
 Section: python
+Testsuite: autopkgtest-pkg-pybuild
 Priority: optional
 Build-Depends: debhelper-compat (= 13),
                dh-sequence-python3,
@@ -18,9 +19,9 @@ Build-Depends: debhelper-compat (= 13),
                python3-sphinxcontrib-mermaid <!nodoc>,
                python3-sphinx-remove-toctrees <!nodoc>,
                python3-autodocsumm <!nodoc>,
+               python3-jupyter-sphinx <!nodoc>,
                python3-all,
                python3-cfgrib [!i386 !armel !armhf !hurd-i386 !s390x] <!nocheck>,
-               python3-zarr (>= 3.0.6-2),
                python3-h5netcdf <!nocheck>,
                python3-toolz <!nocheck>,
                python3-pickleshare <!nodoc>,
@@ -32,16 +33,18 @@ Build-Depends: debhelper-compat (= 13),
                python3-dask <!nocheck> <!nodoc>,
                python3-ipython <!nodoc>,
                python3-matplotlib <!nocheck> <!nodoc>,
+               python-matplotlib-doc <!nocheck> <!nodoc>,
                python3-netcdf4 <!nocheck> <!nodoc>,
                python3-numpy,
                python3-numpydoc <!nocheck> <!nodoc>,
                python3-pandas,
                python3-pytest <!nocheck>,
+               python3-pytest-asyncio <!nocheck>,
                python3-pytest-xdist <!nocheck>,
-	       python3-pytest-mypy <!nocheck>,
+               python3-pytest-mypy <!nocheck>,
                mypy <!nocheck>,
-	       tzdata-legacy <!nocheck>,
-	       python3-pint <!nocheck>,
+               tzdata-legacy <!nocheck>,
+               python3-pint <!nocheck>,
                python3-scipy <!nocheck> <!nodoc>,
                python3-seaborn <!nocheck> <!nodoc>,
                python3-sphinx <!nodoc>,
@@ -63,14 +66,12 @@ Build-Depends: debhelper-compat (= 13),
                python3-packaging
 Build-Conflicts: python3-pint (<= 0.21~1),
                  python3-pytest-lazy-fixture,
-                 python3-sparse,
                  python3-xarray-datatree
 Standards-Version: 4.7.2
 Vcs-Browser: https://salsa.debian.org/science-team/python-xarray
 Vcs-Git: https://salsa.debian.org/science-team/python-xarray.git
 Homepage: https://xarray.pydata.org/
 Rules-Requires-Root: no
-Testsuite: autopkgtest-pkg-pybuild
 
 Package: python3-xarray
 Architecture: all
diff -pruN 2025.03.1-8/debian/copyright 2025.10.1-1/debian/copyright
--- 2025.03.1-8/debian/copyright	2025-05-05 09:04:03.000000000 +0000
+++ 2025.10.1-1/debian/copyright	2025-10-22 11:37:31.000000000 +0000
@@ -24,226 +24,6 @@ License: Apache-2.0
  On Debian systems, the complete text of the Apache License,
  Version 2.0 can be found in '/usr/share/common-licenses/Apache-2.0'.
 
-License: BSD-3-Clause
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions are met:
- .
- 1. Redistributions of source code must retain the above copyright notice, this
- list of conditions and the following disclaimer.
- .
- 2. Redistributions in binary form must reproduce the above copyright notice,
- this list of conditions and the following disclaimer in the documentation
- and/or other materials provided with the distribution.
- .
- 3. Neither the name of the copyright holder nor the names of its contributors
- may be used to endorse or promote products derived from this software without
- specific prior written permission.
- .
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
- FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
- SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
- CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
- OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-License: Python-2.0
- PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2
- --------------------------------------------
- .
- 1. This LICENSE AGREEMENT is between the Python Software Foundation
- ("PSF"), and the Individual or Organization ("Licensee") accessing and
- otherwise using this software ("Python") in source or binary form and
- its associated documentation.
- .
- 2. Subject to the terms and conditions of this License Agreement, PSF
- hereby grants Licensee a nonexclusive, royalty-free, world-wide
- license to reproduce, analyze, test, perform and/or display publicly,
- prepare derivative works, distribute, and otherwise use Python
- alone or in any derivative version, provided, however, that PSF's
- License Agreement and PSF's notice of copyright, i.e., "Copyright (c)
- 2001, 2002, 2003, 2004, 2005, 2006 Python Software Foundation; All Rights
- Reserved" are retained in Python alone or in any derivative version
- prepared by Licensee.
- .
- 3. In the event Licensee prepares a derivative work that is based on
- or incorporates Python or any part thereof, and wants to make
- the derivative work available to others as provided herein, then
- Licensee hereby agrees to include in any such work a brief summary of
- the changes made to Python.
- .
- 4. PSF is making Python available to Licensee on an "AS IS"
- basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
- IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND
- DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
- FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT
- INFRINGE ANY THIRD PARTY RIGHTS.
- .
- 5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON
- FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS
- A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON,
- OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
- .
- 6. This License Agreement will automatically terminate upon a material
- breach of its terms and conditions.
- .
- 7. Nothing in this License Agreement shall be deemed to create any
- relationship of agency, partnership, or joint venture between PSF and
- Licensee. This License Agreement does not grant permission to use PSF
- trademarks or trade name in a trademark sense to endorse or promote
- products or services of Licensee, or any third party.
- .
- 8. By copying, installing or otherwise using Python, Licensee
- agrees to be bound by the terms and conditions of this License
- Agreement.
- .
- BEOPEN.COM LICENSE AGREEMENT FOR PYTHON 2.0
- -------------------------------------------
- .
- BEOPEN PYTHON OPEN SOURCE LICENSE AGREEMENT VERSION 1
- .
- 1. This LICENSE AGREEMENT is between BeOpen.com ("BeOpen"), having an
- office at 160 Saratoga Avenue, Santa Clara, CA 95051, and the
- Individual or Organization ("Licensee") accessing and otherwise using
- this software in source or binary form and its associated
- documentation ("the Software").
- .
- 2. Subject to the terms and conditions of this BeOpen Python License
- Agreement, BeOpen hereby grants Licensee a non-exclusive,
- royalty-free, world-wide license to reproduce, analyze, test, perform
- and/or display publicly, prepare derivative works, distribute, and
- otherwise use the Software alone or in any derivative version,
- provided, however, that the BeOpen Python License is retained in the
- Software, alone or in any derivative version prepared by Licensee.
- .
- 3. BeOpen is making the Software available to Licensee on an "AS IS"
- basis. BEOPEN MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
- IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, BEOPEN MAKES NO AND
- DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
- FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF THE SOFTWARE WILL NOT
- INFRINGE ANY THIRD PARTY RIGHTS.
- .
- 4. BEOPEN SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF THE
- SOFTWARE FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS
- AS A RESULT OF USING, MODIFYING OR DISTRIBUTING THE SOFTWARE, OR ANY
- DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
- .
- 5. This License Agreement will automatically terminate upon a material
- breach of its terms and conditions.
- .
- 6. This License Agreement shall be governed by and interpreted in all
- respects by the law of the State of California, excluding conflict of
- law provisions. Nothing in this License Agreement shall be deemed to
- create any relationship of agency, partnership, or joint venture
- between BeOpen and Licensee. This License Agreement does not grant
- permission to use BeOpen trademarks or trade names in a trademark
- sense to endorse or promote products or services of Licensee, or any
- third party. As an exception, the "BeOpen Python" logos available at
- http://www.pythonlabs.com/logos.html may be used according to the
- permissions granted on that web page.
- .
- 7. By copying, installing or otherwise using the software, Licensee
- agrees to be bound by the terms and conditions of this License
- Agreement.
- .
- CNRI OPEN SOURCE LICENSE AGREEMENT (for Python 1.6b1)
- --------------------------------------------------
- .
- IMPORTANT: PLEASE READ THE FOLLOWING AGREEMENT CAREFULLY.
- .
- BY CLICKING ON "ACCEPT" WHERE INDICATED BELOW, OR BY COPYING,
- INSTALLING OR OTHERWISE USING PYTHON 1.6, beta 1 SOFTWARE, YOU ARE
- DEEMED TO HAVE AGREED TO THE TERMS AND CONDITIONS OF THIS LICENSE
- AGREEMENT.
- .
- 1. This LICENSE AGREEMENT is between the Corporation for National
- Research Initiatives, having an office at 1895 Preston White Drive,
- Reston, VA 20191 ("CNRI"), and the Individual or Organization
- ("Licensee") accessing and otherwise using Python 1.6, beta 1
- software in source or binary form and its associated documentation,
- as released at the www.python.org Internet site on August 4, 2000
- ("Python 1.6b1").
- .
- 2. Subject to the terms and conditions of this License Agreement, CNRI
- hereby grants Licensee a non-exclusive, royalty-free, world-wide
- license to reproduce, analyze, test, perform and/or display
- publicly, prepare derivative works, distribute, and otherwise use
- Python 1.6b1 alone or in any derivative version, provided, however,
- that CNRIs License Agreement is retained in Python 1.6b1, alone or
- in any derivative version prepared by Licensee.
- .
- Alternately, in lieu of CNRIs License Agreement, Licensee may
- substitute the following text (omitting the quotes): "Python 1.6,
- beta 1, is made available subject to the terms and conditions in
- CNRIs License Agreement. This Agreement may be located on the
- Internet using the following unique, persistent identifier (known
- as a handle): 1895.22/1011. This Agreement may also be obtained
- from a proxy server on the Internet using the
- URL:http://hdl.handle.net/1895.22/1011".
- .
- 3. In the event Licensee prepares a derivative work that is based on
- or incorporates Python 1.6b1 or any part thereof, and wants to make
- the derivative work available to the public as provided herein,
- then Licensee hereby agrees to indicate in any such work the nature
- of the modifications made to Python 1.6b1.
- .
- 4. CNRI is making Python 1.6b1 available to Licensee on an "AS IS"
- basis. CNRI MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
- IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, CNRI MAKES NO AND
- DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR
- FITNESS FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON 1.6b1
- WILL NOT INFRINGE ANY THIRD PARTY RIGHTS.
- .
- 5. CNRI SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF THE
- SOFTWARE FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR
- LOSS AS A RESULT OF USING, MODIFYING OR DISTRIBUTING PYTHON 1.6b1,
- OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY
- THEREOF.
- .
- 6. This License Agreement will automatically terminate upon a material
- breach of its terms and conditions.
- .
- 7. This License Agreement shall be governed by and interpreted in all
- respects by the law of the State of Virginia, excluding conflict of
- law provisions. Nothing in this License Agreement shall be deemed
- to create any relationship of agency, partnership, or joint venture
- between CNRI and Licensee. This License Agreement does not grant
- permission to use CNRI trademarks or trade name in a trademark
- sense to endorse or promote products or services of Licensee, or
- any third party.
- .
- 8. By clicking on the "ACCEPT" button where indicated, or by copying,
- installing or otherwise using Python 1.6b1, Licensee agrees to be
- bound by the terms and conditions of this License Agreement.
- .
- ACCEPT
- .
- CWI LICENSE AGREEMENT FOR PYTHON 0.9.0 THROUGH 1.2
- --------------------------------------------------
- .
- Copyright (c) 1991 - 1995, Stichting Mathematisch Centrum Amsterdam,
- The Netherlands. All rights reserved.
- .
- Permission to use, copy, modify, and distribute this software and its
- documentation for any purpose and without fee is hereby granted,
- provided that the above copyright notice appear in all copies and that
- both that copyright notice and this permission notice appear in
- supporting documentation, and that the name of Stichting Mathematisch
- Centrum or CWI not be used in advertising or publicity pertaining to
- distribution of the software without specific, written prior
- permission.
- .
- STICHTING MATHEMATISCH CENTRUM DISCLAIMS ALL WARRANTIES WITH REGARD TO
- THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
- FITNESS, IN NO EVENT SHALL STICHTING MATHEMATISCH CENTRUM BE LIABLE
- FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
- OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
 Files: xarray/tests/test_dask.py
  xarray/compat/dask_array_ops.py
 Copyright:  2014-2018, Anaconda, Inc. and contributors
diff -pruN 2025.03.1-8/debian/patches/Use-packaged-MathJax.patch 2025.10.1-1/debian/patches/Use-packaged-MathJax.patch
--- 2025.03.1-8/debian/patches/Use-packaged-MathJax.patch	2025-05-05 09:04:03.000000000 +0000
+++ 2025.10.1-1/debian/patches/Use-packaged-MathJax.patch	2025-10-22 11:37:31.000000000 +0000
@@ -11,13 +11,11 @@ Forwarded: not-needed
  doc/conf.py | 4 ++++
  1 file changed, 4 insertions(+)
 
-diff --git a/doc/conf.py b/doc/conf.py
-index 3561ffe..e2fe5be 100644
 --- a/doc/conf.py
 +++ b/doc/conf.py
-@@ -190,6 +190,10 @@ napoleon_type_aliases = {
+@@ -185,6 +185,10 @@
  # mermaid config
- mermaid_version = "10.9.1"
+ mermaid_version = "11.6.0"
  
 +mathjax_path = 'file:///usr/share/javascript/mathjax/MathJax.js?config=TeX-AMS-MML_HTMLorMML'
 +
diff -pruN 2025.03.1-8/debian/patches/Use-packaged-docs.patch 2025.10.1-1/debian/patches/Use-packaged-docs.patch
--- 2025.03.1-8/debian/patches/Use-packaged-docs.patch	2025-05-05 09:04:03.000000000 +0000
+++ 2025.10.1-1/debian/patches/Use-packaged-docs.patch	2025-10-22 11:37:31.000000000 +0000
@@ -12,77 +12,24 @@ Updated 2025-02-02 to disable use of sph
 
 --- a/doc/conf.py
 +++ b/doc/conf.py
-@@ -58,7 +58,7 @@ except ImportError:
-         ]
-     )
- 
+@@ -87,7 +87,7 @@
+ # NBSphinx configuration
+ nbsphinx_timeout = 600
+ nbsphinx_execute = "always"
 -nbsphinx_allow_errors = False
 +nbsphinx_allow_errors = True
  nbsphinx_requirejs_path = ""
- 
- # -- General configuration ------------------------------------------------
-@@ -229,7 +229,7 @@ pygments_style = "sphinx"
- # -- Options for HTML output ----------------------------------------------
- # The theme to use for HTML and HTML Help pages.  See the documentation for
- # a list of builtin themes.
--html_theme = "sphinx_book_theme"
-+
- html_title = ""
- 
- html_context = {
-@@ -242,25 +242,25 @@ html_context = {
- # Theme options are theme-specific and customize the look and feel of a theme
- # further.  For a list of options available for each theme, see the
- # documentation.
--html_theme_options = dict(
--    # analytics_id=''  this is configured in rtfd.io
--    # canonical_url="",
--    repository_url="https://github.com/pydata/xarray",
--    repository_branch="main",
--    navigation_with_keys=False,  # pydata/pydata-sphinx-theme#1492
--    navigation_depth=4,
--    path_to_docs="doc",
--    use_edit_page_button=True,
--    use_repository_button=True,
--    use_issues_button=True,
--    home_page_in_toc=False,
--    extra_footer="""<p>Xarray is a fiscally sponsored project of <a href="https://numfocus.org">NumFOCUS</a>,
--    a nonprofit dedicated to supporting the open-source scientific computing community.<br>
--    Theme by the <a href="https://ebp.jupyterbook.org">Executable Book Project</a></p>""",
--    twitter_url="https://twitter.com/xarray_dev",
--    icon_links=[],  # workaround for pydata/pydata-sphinx-theme#1220
--    # announcement="<a href='https://forms.gle/KEq7WviCdz9xTaJX6'>Xarray's 2024 User Survey is live now. Please take ~5 minutes to fill it out and help us improve Xarray.</a>",
--)
-+# html_theme_options = dict(
-+#     # analytics_id=''  this is configured in rtfd.io
-+#     # canonical_url="",
-+#     repository_url="https://github.com/pydata/xarray",
-+#     repository_branch="main",
-+#     navigation_with_keys=False,  # pydata/pydata-sphinx-theme#1492
-+#     navigation_depth=4,
-+#     path_to_docs="doc",
-+#     use_edit_page_button=True,
-+#     use_repository_button=True,
-+#     use_issues_button=True,
-+#     home_page_in_toc=False,
-+#     extra_footer="""<p>Xarray is a fiscally sponsored project of <a href="https://numfocus.org">NumFOCUS</a>,
-+#     a nonprofit dedicated to supporting the open-source scientific computing community.<br>
-+#     Theme by the <a href="https://ebp.jupyterbook.org">Executable Book Project</a></p>""",
-+#     twitter_url="https://twitter.com/xarray_dev",
-+#     icon_links=[],  # workaround for pydata/pydata-sphinx-theme#1220
-+#     # announcement="<a href='https://forms.gle/KEq7WviCdz9xTaJX6'>Xarray's 2024 User Survey is live now. Please take ~5 minutes to fill it out and help us improve Xarray.</a>",
-+# )
- 
- 
- # The name of an image file (relative to this directory) to place at the top
-@@ -335,16 +335,16 @@ htmlhelp_basename = "xarraydoc"
+ #  png2x/retina rendering of figues in docs would also need to modify custom.css:
+ # https://github.com/spatialaudio/nbsphinx/issues/464#issuecomment-652729126
+@@ -326,19 +326,19 @@
  intersphinx_mapping = {
      "cftime": ("https://unidata.github.io/cftime", None),
      "cubed": ("https://cubed-dev.github.io/cubed/", None),
 -    "dask": ("https://docs.dask.org/en/latest", None),
 +    "dask": ("https://docs.dask.org/en/latest", "/usr/share/doc/python-dask-doc/html/objects.inv"),
      "flox": ("https://flox.readthedocs.io/en/latest/", None),
-     "hypothesis": ("https://hypothesis.readthedocs.io/en/latest/", None),
+-    "hypothesis": ("https://hypothesis.readthedocs.io/en/latest/", None),
++    "hypothesis": ("https://hypothesis.readthedocs.io/en/latest/", "/usr/share/doc/python-hypothesis-doc/html/objects.inv",
      "iris": ("https://scitools-iris.readthedocs.io/en/latest", None),
 -    "matplotlib": ("https://matplotlib.org/stable/", None),
 -    "numba": ("https://numba.readthedocs.io/en/stable/", None),
@@ -94,7 +41,11 @@ Updated 2025-02-02 to disable use of sph
 +    "numpy": ("https://numpy.org/doc/stable", '/usr/share/doc/numba-doc/html/objects.inv'),
      "scipy": ("https://docs.scipy.org/doc/scipy", None),
 +    "numba": ("https://numba.pydata.org/numba-doc/latest", '/usr/share/doc/numba-doc/html/objects.inv'),
-+    "matplotlib": ("https://matplotlib.org/stable/",  None),
++    "matplotlib": ("https://matplotlib.org/stable/",  "/usr/share/doc/python-matplotlib-doc/html/objects.inv"),
      "sparse": ("https://sparse.pydata.org/en/latest/", None),
      "xarray-tutorial": ("https://tutorial.xarray.dev/", None),
-     "zarr": ("https://zarr.readthedocs.io/en/stable/", None),
+-    "zarr": ("https://zarr.readthedocs.io/en/stable/", None),
++    "zarr": ("https://zarr.readthedocs.io/en/stable/", "/usr/share/doc/python3-zarr/html/objects.inv"),
+     "xarray-lmfit": ("https://xarray-lmfit.readthedocs.io/stable", None),
+ }
+ 
diff -pruN 2025.03.1-8/debian/patches/disable-cftime-false-standard-calendar-tests.patch 2025.10.1-1/debian/patches/disable-cftime-false-standard-calendar-tests.patch
--- 2025.03.1-8/debian/patches/disable-cftime-false-standard-calendar-tests.patch	2025-05-05 09:04:03.000000000 +0000
+++ 2025.10.1-1/debian/patches/disable-cftime-false-standard-calendar-tests.patch	2025-10-22 11:37:31.000000000 +0000
@@ -6,11 +6,9 @@ Subject: Disable test failures known ups
  xarray/tests/test_backends.py | 5 ++---
  1 file changed, 2 insertions(+), 3 deletions(-)
 
-Index: python-xarray-2025.03.0/xarray/tests/test_backends.py
-===================================================================
---- python-xarray-2025.03.0.orig/xarray/tests/test_backends.py
-+++ python-xarray-2025.03.0/xarray/tests/test_backends.py
-@@ -5724,9 +5724,8 @@ def test_use_cftime_true(calendar, units
+--- a/xarray/tests/test_backends.py
++++ b/xarray/tests/test_backends.py
+@@ -6892,9 +6892,8 @@
  
  @requires_scipy_or_netCDF4
  @pytest.mark.parametrize("calendar", _STANDARD_CALENDARS)
diff -pruN 2025.03.1-8/debian/patches/disable-iris.patch 2025.10.1-1/debian/patches/disable-iris.patch
--- 2025.03.1-8/debian/patches/disable-iris.patch	2025-05-05 09:04:03.000000000 +0000
+++ 2025.10.1-1/debian/patches/disable-iris.patch	2025-10-22 11:37:31.000000000 +0000
@@ -8,14 +8,12 @@ Forwarded: not-needed
  doc/conf.py | 1 -
  1 file changed, 1 deletion(-)
 
-diff --git a/doc/conf.py b/doc/conf.py
-index e2fe5be..a038e6f 100644
 --- a/doc/conf.py
 +++ b/doc/conf.py
-@@ -342,7 +342,6 @@ intersphinx_mapping = {
+@@ -333,7 +333,6 @@
      "dask": ("https://docs.dask.org/en/latest", "/usr/share/doc/python-dask-doc/html/objects.inv"),
      "flox": ("https://flox.readthedocs.io/en/latest/", None),
-     "hypothesis": ("https://hypothesis.readthedocs.io/en/latest/", None),
+     "hypothesis": ("https://hypothesis.readthedocs.io/en/latest/", "/usr/share/doc/python-hypothesis-doc/html/objects.inv",
 -    "iris": ("https://scitools-iris.readthedocs.io/en/latest", None),
      "pandas": ("https://pandas.pydata.org/pandas-docs/stable", '/usr/share/doc/python-pandas-doc/html/objects.inv'),    
      "python": ("https://docs.python.org/3/", "/usr/share/doc/python3/html/objects.inv"),
diff -pruN 2025.03.1-8/debian/patches/disable-netcdf-compressor-tests.patch 2025.10.1-1/debian/patches/disable-netcdf-compressor-tests.patch
--- 2025.03.1-8/debian/patches/disable-netcdf-compressor-tests.patch	2025-05-05 09:04:03.000000000 +0000
+++ 2025.10.1-1/debian/patches/disable-netcdf-compressor-tests.patch	1970-01-01 00:00:00.000000000 +0000
@@ -1,29 +0,0 @@
-From: Alastair McKinstry <mckinstry@debian.org>
-Date: Sun, 21 Jan 2024 11:07:30 +0000
-Subject: NetCDF4 in Debian is not yet built with zstd or blosc compressors.
-
-Last-Updated: 2024-01-21
-Forwarded: not-needed
-
-Disable testing of these, which fail
----
- xarray/tests/test_backends.py | 6 ------
- 1 file changed, 6 deletions(-)
-
-diff --git a/xarray/tests/test_backends.py b/xarray/tests/test_backends.py
-index cfca5e6..811ae58 100644
---- a/xarray/tests/test_backends.py
-+++ b/xarray/tests/test_backends.py
-@@ -2089,12 +2089,6 @@ class TestNetCDF4Data(NetCDF4Base):
-             None,
-             "zlib",
-             "szip",
--            "zstd",
--            "blosc_lz",
--            "blosc_lz4",
--            "blosc_lz4hc",
--            "blosc_zlib",
--            "blosc_zstd",
-         ],
-     )
-     @requires_netCDF4_1_6_2_or_above
diff -pruN 2025.03.1-8/debian/patches/disable-netcdf-roundtrip-tests.patch 2025.10.1-1/debian/patches/disable-netcdf-roundtrip-tests.patch
--- 2025.03.1-8/debian/patches/disable-netcdf-roundtrip-tests.patch	2025-05-05 09:04:03.000000000 +0000
+++ 2025.10.1-1/debian/patches/disable-netcdf-roundtrip-tests.patch	2025-10-22 11:37:31.000000000 +0000
@@ -10,11 +10,9 @@ Disable testing of these for experimenta
  xarray/tests/test_backends.py | 1 +
  1 file changed, 1 insertion(+)
 
-diff --git a/xarray/tests/test_backends.py b/xarray/tests/test_backends.py
-index cf84c9e..cefe22d 100644
 --- a/xarray/tests/test_backends.py
 +++ b/xarray/tests/test_backends.py
-@@ -958,6 +958,7 @@ class CFEncodedBase(DatasetIOBase):
+@@ -1105,6 +1105,7 @@
              (create_masked_and_scaled_data, create_encoded_masked_and_scaled_data),
          ],
      )
diff -pruN 2025.03.1-8/debian/patches/hypothesis_relax 2025.10.1-1/debian/patches/hypothesis_relax
--- 2025.03.1-8/debian/patches/hypothesis_relax	1970-01-01 00:00:00.000000000 +0000
+++ 2025.10.1-1/debian/patches/hypothesis_relax	2025-10-22 11:37:31.000000000 +0000
@@ -0,0 +1,24 @@
+Author: Michael R. Crusoe <crusoe@debian.org>
+Description: Workaround for 'Input generation is slow: Hypothesis only generated 4 valid inputs after 1.55 seconds.'
+Forwarded: not-needed
+
+Due to slow test runners.
+--- python-xarray.orig/xarray/tests/test_strategies.py
++++ python-xarray/xarray/tests/test_strategies.py
+@@ -10,7 +10,7 @@
+ 
+ import hypothesis.extra.numpy as npst
+ import hypothesis.strategies as st
+-from hypothesis import given
++from hypothesis import given, settings, HealthCheck
+ from hypothesis.extra.array_api import make_strategies_namespace
+ 
+ from xarray.core.options import set_options
+@@ -28,6 +28,7 @@
+ 
+ 
+ class TestDimensionNamesStrategy:
++    @settings(suppress_health_check=[HealthCheck.too_slow])
+     @given(dimension_names())
+     def test_types(self, dims):
+         assert isinstance(dims, list)
diff -pruN 2025.03.1-8/debian/patches/import-pytest.patch 2025.10.1-1/debian/patches/import-pytest.patch
--- 2025.03.1-8/debian/patches/import-pytest.patch	2025-05-05 09:04:03.000000000 +0000
+++ 2025.10.1-1/debian/patches/import-pytest.patch	1970-01-01 00:00:00.000000000 +0000
@@ -1,48 +0,0 @@
-Description: Make suitable for execution under pytest
-  Gets discovered automatically by pytest, so fix up import
-  and markups
-Author: Alastair McKinstry <mckinstry@debian.org>
-Last-Updated: 2025-03-26
-Forwarded: no
-
-Index: python-xarray-2025.03.0/xarray/tests/test_tutorial.py
-===================================================================
---- python-xarray-2025.03.0.orig/xarray/tests/test_tutorial.py
-+++ python-xarray-2025.03.0/xarray/tests/test_tutorial.py
-@@ -3,18 +3,19 @@ from __future__ import annotations
- from xarray import DataArray, DataTree, tutorial
- from xarray.testing import assert_identical
- from xarray.tests import network
-+import pytest
- 
- 
- @network
- class TestLoadDataset:
--    @pytest.mark_xfail
-+    @pytest.mark.xfail
-     def test_download_from_github(self, tmp_path) -> None:
-         cache_dir = tmp_path / tutorial._default_cache_dir_name
-         ds = tutorial.open_dataset("tiny", cache_dir=cache_dir).load()
-         tiny = DataArray(range(5), name="tiny").to_dataset()
-         assert_identical(ds, tiny)
- 
--    @pytest.mark_xfail
-+    @pytest.mark.xfail
-     def test_download_from_github_load_without_cache(
-         self, tmp_path, monkeypatch
-     ) -> None:
-@@ -29,12 +30,14 @@ class TestLoadDataset:
- 
- @network
- class TestLoadDataTree:
-+    @pytest.mark.skip("Skipping due to download needed")
-     def test_download_from_github(self, tmp_path) -> None:
-         cache_dir = tmp_path / tutorial._default_cache_dir_name
-         ds = tutorial.open_datatree("tiny", cache_dir=cache_dir).load()
-         tiny = DataTree.from_dict({"/": DataArray(range(5), name="tiny").to_dataset()})
-         assert_identical(ds, tiny)
- 
-+    @pytest.mark.skip("Skipping due to download needed")
-     def test_download_from_github_load_without_cache(
-         self, tmp_path, monkeypatch
-     ) -> None:
diff -pruN 2025.03.1-8/debian/patches/ipython-fix.patch 2025.10.1-1/debian/patches/ipython-fix.patch
--- 2025.03.1-8/debian/patches/ipython-fix.patch	2025-05-05 09:04:03.000000000 +0000
+++ 2025.10.1-1/debian/patches/ipython-fix.patch	2025-10-22 11:37:31.000000000 +0000
@@ -11,11 +11,9 @@ missing dependencies in ipython
  doc/conf.py | 3 +++
  1 file changed, 3 insertions(+)
 
-Index: python-xarray-2025.03.0/doc/conf.py
-===================================================================
---- python-xarray-2025.03.0.orig/doc/conf.py
-+++ python-xarray-2025.03.0/doc/conf.py
-@@ -35,6 +35,9 @@ allowed_failures = set()
+--- a/doc/conf.py
++++ b/doc/conf.py
+@@ -21,6 +21,9 @@
  print("python exec:", sys.executable)
  print("sys.path:", sys.path)
  
diff -pruN 2025.03.1-8/debian/patches/no-accessors.patch 2025.10.1-1/debian/patches/no-accessors.patch
--- 2025.03.1-8/debian/patches/no-accessors.patch	2025-05-05 09:04:03.000000000 +0000
+++ 2025.10.1-1/debian/patches/no-accessors.patch	2025-10-22 11:37:31.000000000 +0000
@@ -8,20 +8,18 @@ Forwarded: not-needed
  doc/conf.py | 8 ++++----
  1 file changed, 4 insertions(+), 4 deletions(-)
 
-Index: python-xarray-2025.03.0/doc/conf.py
-===================================================================
---- python-xarray-2025.03.0.orig/doc/conf.py
-+++ python-xarray-2025.03.0/doc/conf.py
-@@ -21,7 +21,7 @@ import sys
+--- a/doc/conf.py
++++ b/doc/conf.py
+@@ -7,7 +7,7 @@
  from contextlib import suppress
  from textwrap import dedent, indent
- 
+ import packaging.version
 -import sphinx_autosummary_accessors
-+# import sphinx_autosummary_accessors
++#import sphinx_autosummary_accessors
  import yaml
  from sphinx.application import Sphinx
  from sphinx.util import logging
-@@ -76,7 +76,7 @@ nbsphinx_requirejs_path = ""
+@@ -59,14 +59,14 @@
  extensions = [
      "sphinxcontrib.mermaid",
      "sphinx.ext.autodoc",
@@ -30,21 +28,20 @@ Index: python-xarray-2025.03.0/doc/conf.
      "sphinx.ext.intersphinx",
      "sphinx.ext.extlinks",
      "sphinx.ext.mathjax",
-@@ -84,7 +84,7 @@ extensions = [
-     "IPython.sphinxext.ipython_directive",
-     "IPython.sphinxext.ipython_console_highlighting",
+     "sphinx.ext.napoleon",
+     "jupyter_sphinx",
      "nbsphinx",
 -    "sphinx_autosummary_accessors",
 +#    "sphinx_autosummary_accessors",
      "sphinx.ext.linkcode",
      "sphinxext.opengraph",
      "sphinx_copybutton",
-@@ -198,7 +198,7 @@ mathjax_path = 'file:///usr/share/javasc
+@@ -193,7 +193,7 @@
  require_js_url = 'file:////usr/share/javascript/requirejs/require.min.js'
  
  # Add any paths that contain templates here, relative to this directory.
 -templates_path = ["_templates", sphinx_autosummary_accessors.templates_path]
 +templates_path = ["_templates" ] # , sphinx_autosummary_accessors.templates_path]
  
- # The suffix of source filenames.
- # source_suffix = ".rst"
+ # The master toctree document.
+ master_doc = "index"
diff -pruN 2025.03.1-8/debian/patches/pip3.patch 2025.10.1-1/debian/patches/pip3.patch
--- 2025.03.1-8/debian/patches/pip3.patch	2025-05-05 09:04:03.000000000 +0000
+++ 2025.10.1-1/debian/patches/pip3.patch	2025-10-22 11:37:31.000000000 +0000
@@ -8,11 +8,9 @@ Forwarded: no
  xarray/util/print_versions.py | 2 +-
  1 file changed, 1 insertion(+), 1 deletion(-)
 
-Index: python-xarray-2025.03.0/xarray/util/print_versions.py
-===================================================================
---- python-xarray-2025.03.0.orig/xarray/util/print_versions.py
-+++ python-xarray-2025.03.0/xarray/util/print_versions.py
-@@ -123,7 +123,7 @@ def show_versions(file=sys.stdout):
+--- a/xarray/util/print_versions.py
++++ b/xarray/util/print_versions.py
+@@ -122,7 +122,7 @@
          ("numpy_groupies", lambda mod: mod.__version__),
          # xarray setup/test
          ("setuptools", lambda mod: mod.__version__),
diff -pruN 2025.03.1-8/debian/patches/series 2025.10.1-1/debian/patches/series
--- 2025.03.1-8/debian/patches/series	2025-05-05 09:04:03.000000000 +0000
+++ 2025.10.1-1/debian/patches/series	2025-10-22 11:37:31.000000000 +0000
@@ -5,8 +5,7 @@ ipython-fix.patch
 xfail-on-download.patch
 pip3.patch
 no-accessors.patch
-disable-netcdf-compressor-tests.patch
 skip-tests-needing-download.patch
 disable-netcdf-roundtrip-tests.patch
 disable-cftime-false-standard-calendar-tests.patch
-import-pytest.patch
+# xfails-that-break-pytest.patch
diff -pruN 2025.03.1-8/debian/patches/skip-tests-needing-download.patch 2025.10.1-1/debian/patches/skip-tests-needing-download.patch
--- 2025.03.1-8/debian/patches/skip-tests-needing-download.patch	2025-05-05 09:04:03.000000000 +0000
+++ 2025.10.1-1/debian/patches/skip-tests-needing-download.patch	2025-10-22 11:37:31.000000000 +0000
@@ -6,19 +6,17 @@ Subject: Skip 2 tests requiring download
  xarray/tests/test_backends.py | 2 ++
  1 file changed, 2 insertions(+)
 
-diff --git a/xarray/tests/test_backends.py b/xarray/tests/test_backends.py
-index 811ae58..cf84c9e 100644
 --- a/xarray/tests/test_backends.py
 +++ b/xarray/tests/test_backends.py
-@@ -4290,6 +4290,7 @@ class TestH5NetCDFDataRos3Driver(TestCommon):
-         "https://www.unidata.ucar.edu/software/netcdf/examples/OMI-Aura_L2-example.nc"
-     )
+@@ -5254,6 +5254,7 @@
+     engine: T_NetcdfEngine = "h5netcdf"
+     test_remote_dataset: str = "https://archive.unidata.ucar.edu/software/netcdf/examples/OMI-Aura_L2-example.nc"
  
 +    @pytest.mark.skip("Skipping due to download needed")
      @pytest.mark.filterwarnings("ignore:Duplicate dimension names")
      def test_get_variable_list(self) -> None:
          with open_dataset(
-@@ -4299,6 +4300,7 @@ class TestH5NetCDFDataRos3Driver(TestCommon):
+@@ -5263,6 +5264,7 @@
          ) as actual:
              assert "Temperature" in list(actual)
  
diff -pruN 2025.03.1-8/debian/patches/xfail-on-download.patch 2025.10.1-1/debian/patches/xfail-on-download.patch
--- 2025.03.1-8/debian/patches/xfail-on-download.patch	2025-05-05 09:04:03.000000000 +0000
+++ 2025.10.1-1/debian/patches/xfail-on-download.patch	2025-10-22 11:37:31.000000000 +0000
@@ -8,22 +8,40 @@ Last-Updated: 2021-08-25
  xarray/tests/test_tutorial.py | 2 ++
  1 file changed, 2 insertions(+)
 
-Index: python-xarray-2025.03.0/xarray/tests/test_tutorial.py
-===================================================================
---- python-xarray-2025.03.0.orig/xarray/tests/test_tutorial.py
-+++ python-xarray-2025.03.0/xarray/tests/test_tutorial.py
-@@ -7,12 +7,14 @@ from xarray.tests import network
+--- python-xarray.orig/xarray/tests/test_tutorial.py
++++ python-xarray/xarray/tests/test_tutorial.py
+@@ -3,16 +3,18 @@
+ from xarray import DataArray, DataTree, tutorial
+ from xarray.testing import assert_identical
+ from xarray.tests import network
+-
++import pytest
  
  @network
  class TestLoadDataset:
-+    @pytest.mark_xfail
++    @pytest.mark.skip("Skipping due to download needed")
      def test_download_from_github(self, tmp_path) -> None:
          cache_dir = tmp_path / tutorial._default_cache_dir_name
-         ds = tutorial.open_dataset("tiny", cache_dir=cache_dir).load()
+         ds = tutorial.load_dataset("tiny", cache_dir=cache_dir)
          tiny = DataArray(range(5), name="tiny").to_dataset()
          assert_identical(ds, tiny)
  
-+    @pytest.mark_xfail
-     def test_download_from_github_load_without_cache(
-         self, tmp_path, monkeypatch
-     ) -> None:
++    @pytest.mark.skip("Skipping due to download needed")
+     def test_download_from_github_load_without_cache(self, tmp_path) -> None:
+         cache_dir = tmp_path / tutorial._default_cache_dir_name
+         ds_nocache = tutorial.load_dataset("tiny", cache=False, cache_dir=cache_dir)
+@@ -22,12 +24,14 @@
+ 
+ @network
+ class TestLoadDataTree:
++    @pytest.mark.skip("Skipping due to download needed")
+     def test_download_from_github(self, tmp_path) -> None:
+         cache_dir = tmp_path / tutorial._default_cache_dir_name
+         ds = tutorial.load_datatree("tiny", cache_dir=cache_dir)
+         tiny = DataTree.from_dict({"/": DataArray(range(5), name="tiny").to_dataset()})
+         assert_identical(ds, tiny)
+ 
++    @pytest.mark.skip("Skipping due to download needed")
+     def test_download_from_github_load_without_cache(self, tmp_path) -> None:
+         cache_dir = tmp_path / tutorial._default_cache_dir_name
+         ds_nocache = tutorial.load_datatree("tiny", cache=False, cache_dir=cache_dir)
diff -pruN 2025.03.1-8/debian/patches/xfails-that-break-pytest.patch 2025.10.1-1/debian/patches/xfails-that-break-pytest.patch
--- 2025.03.1-8/debian/patches/xfails-that-break-pytest.patch	1970-01-01 00:00:00.000000000 +0000
+++ 2025.10.1-1/debian/patches/xfails-that-break-pytest.patch	2025-10-22 11:37:31.000000000 +0000
@@ -0,0 +1,63 @@
+--- a/xarray/tests/test_dataarray_typing.yml
++++ b/xarray/tests/test_dataarray_typing.yml
+@@ -106,18 +106,6 @@
+     main:7: note:     def [P`2, T] pipe(self, func: Callable[[DataArray, **P], T], *args: P.args, **kwargs: P.kwargs) -> T
+     main:7: note:     def [T] pipe(self, func: tuple[Callable[..., T], str], *args: Any, **kwargs: Any) -> T
+ 
+-- case: test_mypy_pipe_function_unexpected_keyword
+-  main: |
+-    from xarray import DataArray
+-
+-    def f(da: DataArray, arg: int, *, kwonly: int) -> DataArray:
+-        return da
+-
+-    # Call to pipe using wrong keyword: `kw` instead of `kwonly`
+-    da = DataArray().pipe(f, 42, kw=99)
+-  out: |
+-    main:7: error: Unexpected keyword argument "kw" for "pipe" of "DataWithCoords"  [call-arg]
+-
+ - case: test_mypy_pipe_tuple_return_type_dataarray
+   main: |
+     from xarray import DataArray
+--- a/xarray/tests/test_dataset_typing.yml
++++ b/xarray/tests/test_dataset_typing.yml
+@@ -106,18 +106,6 @@
+     main:7: note:     def [P`2, T] pipe(self, func: Callable[[Dataset, **P], T], *args: P.args, **kwargs: P.kwargs) -> T
+     main:7: note:     def [T] pipe(self, func: tuple[Callable[..., T], str], *args: Any, **kwargs: Any) -> T
+ 
+-- case: test_mypy_pipe_function_unexpected_keyword
+-  main: |
+-    from xarray import Dataset
+-
+-    def f(ds: Dataset, arg: int, *, kwonly: int) -> Dataset:
+-        return ds
+-
+-    # Call to pipe using wrong keyword: `kw` instead of `kwonly`
+-    ds = Dataset().pipe(f, 42, kw=99)
+-  out: |
+-    main:7: error: Unexpected keyword argument "kw" for "pipe" of "DataWithCoords"  [call-arg]
+-
+ - case: test_mypy_pipe_tuple_return_type_dataset
+   main: |
+     from xarray import Dataset
+--- a/xarray/tests/test_datatree_typing.yml
++++ b/xarray/tests/test_datatree_typing.yml
+@@ -106,18 +106,6 @@
+     main:7: note:     def [P`2, T] pipe(self, func: Callable[[DataTree, **P], T], *args: P.args, **kwargs: P.kwargs) -> T
+     main:7: note:     def [T] pipe(self, func: tuple[Callable[..., T], str], *args: Any, **kwargs: Any) -> T
+ 
+-- case: test_mypy_pipe_function_unexpected_keyword
+-  main: |
+-    from xarray import DataTree
+-
+-    def f(dt: DataTree, arg: int, *, kwonly: int) -> DataTree:
+-        return dt
+-
+-    # Call to pipe using wrong keyword: `kw` instead of `kwonly`
+-    dt = DataTree().pipe(f, 42, kw=99)
+-  out: |
+-    main:7: error: Unexpected keyword argument "kw" for "pipe" of "DataTree"  [call-arg]
+-
+ - case: test_mypy_pipe_tuple_return_type_datatree
+   main: |
+     from xarray import DataTree
diff -pruN 2025.03.1-8/debian/rules 2025.10.1-1/debian/rules
--- 2025.03.1-8/debian/rules	2025-05-05 09:04:03.000000000 +0000
+++ 2025.10.1-1/debian/rules	2025-10-22 11:37:31.000000000 +0000
@@ -3,6 +3,8 @@
 # Uncomment this to turn on verbose mode.
 export DH_VERBOSE=1
 
+include /usr/share/dpkg/buildopts.mk
+
 # Path to tests differs for pybuild and autopkgtest environments
 # Architecture determination also differs.
 ifeq ($(PYBUILD_AUTOPKGTEST),1)
@@ -19,7 +21,7 @@ PY3VERS:= $(shell py3versions -s)
 
 # Disable  for this release:
 # - tests need pytest < 8
-# DEB_BUILD_OPTIONS += nodoc
+DEB_BUILD_OPTIONS += nodoc
 
 export PYDEVD_DISABLE_FILE_VALIDATION=1
 
@@ -32,11 +34,12 @@ export HOME=$(CURDIR)/debian/build
 # Stop parallel tests,, broken for some reason?
 # export PYBUILD_TEST_ARGS=-r fEs -n auto -k 'not (test_weighted_operations_keep_attr or test_reduce_keepdims)' --pyargs xarray
 
-export PYBUILD_TEST_ARGS := --deselect=$(TESTPATH)/test_weighted.py::test_weighted_operations_keep_attr --deselect=$(TESTPATH)/test_variable.py::TestVariable::test_reduce_keepdims --deselect=$(TESTPATH)/test_backends.py::test_zarr_region_chunk_partial_offset --deselect=$(TESTPATH)/test_backends.py::test_chunk_encoding_with_dask --pyargs xarray
+export PYBUILD_TEST_ARGS := $(DEB_BUILD_OPTION_PARALLEL:%=-n%) --deselect=$(TESTPATH)/test_weighted.py::test_weighted_operations_keep_attr --deselect=$(TESTPATH)/test_variable.py::TestVariable::test_reduce_keepdims --deselect=$(TESTPATH)/test_backends.py::test_zarr_region_chunk_partial_offset --deselect=$(TESTPATH)/test_backends.py::test_chunk_encoding_with_dask -k 'not test_mypy' --pyargs xarray
 
 # Some tests fail on 32-bit archs; skip those tests
 BIT32_ARCH := i386 armel armhf
 ifneq (,$(filter $(TESTARCH), $(BIT32_ARCH)))
+export PYBUILD_DISABLE_python3=test
 export PYBUILD_TEST_ARGS := --deselect=$(TESTPATH)/test_cftimeindex.py::test_asi8 --deselect=$(TESTPATH)/test_datatree.py::TestRepr::test_doc_example $(PYBUILD_TEST_ARGS) --deselect=$(TESTPATH)/test_strategies.py::TestReduction::test_mean
 endif
 
@@ -55,7 +58,7 @@ endif
 
 execute_after_dh_auto_install: $(PYTHON3:%=install-python%)
 	find debian/python3-xarray -name '*.idx' -exec chmod -x {} \;
-	find debian -name  test.nc -delete 
+	find debian -name  test.nc -delete
 
 override_dh_auto_build:
 	http_proxy=127.0.0.1:9 https_proxy=127.0.0.1:9 dh_auto_build
@@ -65,6 +68,8 @@ endif
 
 ifeq (,$(filter nodoc,$(DEB_BUILD_OPTIONS)))
 override_dh_sphinxdoc:
+	ln -s /usr/lib/python3/dist-packages/jupyter_sphinx/thebelab/thebelab-helper.js \
+		debian/python-xarray-doc/usr/share/doc/python-xarray-doc/html/_static
 	dh_sphinxdoc --exclude=MathJax.js
 	find debian/python-xarray-doc -name '*.html' \
 	 -exec grep require.js {} /dev/null \; | cut -f1 -d: | while read r; do \
diff -pruN 2025.03.1-8/debian/tests/autopkgtest-pkg-pybuild.conf 2025.10.1-1/debian/tests/autopkgtest-pkg-pybuild.conf
--- 2025.03.1-8/debian/tests/autopkgtest-pkg-pybuild.conf	1970-01-01 00:00:00.000000000 +0000
+++ 2025.10.1-1/debian/tests/autopkgtest-pkg-pybuild.conf	2025-10-22 11:37:31.000000000 +0000
@@ -0,0 +1 @@
+architecture=!i386 !armhf
diff -pruN 2025.03.1-8/debian/upstream/metadata 2025.10.1-1/debian/upstream/metadata
--- 2025.03.1-8/debian/upstream/metadata	2025-05-05 09:04:03.000000000 +0000
+++ 2025.10.1-1/debian/upstream/metadata	2025-10-22 11:37:31.000000000 +0000
@@ -1,4 +1,6 @@
 ---
 Bug-Database: https://github.com/pydata/xarray/issues
 Bug-Submit: https://github.com/pydata/xarray/issues/new
+Documentation: https://docs.xarray.dev
+Repository: https://github.com/pydata/xarray.git
 Repository-Browse: https://github.com/pydata/xarray
diff -pruN 2025.03.1-8/design_notes/flexible_indexes_notes.md 2025.10.1-1/design_notes/flexible_indexes_notes.md
--- 2025.03.1-8/design_notes/flexible_indexes_notes.md	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/design_notes/flexible_indexes_notes.md	2025-10-10 10:38:05.000000000 +0000
@@ -97,12 +97,12 @@ The new `indexes` argument of Dataset/Da
 ```python
 >>> da = xr.DataArray(
 ...     data=[[275.2, 273.5], [270.8, 278.6]],
-...     dims=('x', 'y'),
+...     dims=("x", "y"),
 ...     coords={
-...         'lat': (('x', 'y'), [[45.6, 46.5], [50.2, 51.6]]),
-...         'lon': (('x', 'y'), [[5.7, 10.5], [6.2, 12.8]]),
+...         "lat": (("x", "y"), [[45.6, 46.5], [50.2, 51.6]]),
+...         "lon": (("x", "y"), [[5.7, 10.5], [6.2, 12.8]]),
 ...     },
-...     indexes={('lat', 'lon'): SpatialIndex},
+...     indexes={("lat", "lon"): SpatialIndex},
 ... )
 <xarray.DataArray (x: 2, y: 2)>
 array([[275.2, 273.5],
@@ -120,7 +120,7 @@ More formally, `indexes` would accept `M
 Currently index objects like `pandas.MultiIndex` can be passed directly to `coords`, which in this specific case results in the implicit creation of virtual coordinates. With the new `indexes` argument this behavior may become even more confusing than it currently is. For the sake of clarity, it would be appropriate to eventually drop support for this specific behavior and treat any given mapping value given in `coords` as an array that can be wrapped into an Xarray variable, i.e., in the case of a multi-index:
 
 ```python
->>> xr.DataArray([1.0, 2.0], dims='x', coords={'x': midx})
+>>> xr.DataArray([1.0, 2.0], dims="x", coords={"x": midx})
 <xarray.DataArray (x: 2)>
 array([1., 2.])
 Coordinates:
@@ -166,11 +166,11 @@ Besides `pandas.Index`, other indexes cu
 
 Like for the indexes, explicit coordinate creation should be preferred over implicit coordinate creation. However, there may be some situations where we would like to keep creating coordinates implicitly for backwards compatibility.
 
-For example, it is currently possible to pass a `pandas.MulitIndex` object as a coordinate to the Dataset/DataArray constructor:
+For example, it is currently possible to pass a `pandas.MultiIndex` object as a coordinate to the Dataset/DataArray constructor:
 
 ```python
->>> midx = pd.MultiIndex.from_arrays([['a', 'b'], [0, 1]], names=['lvl1', 'lvl2'])
->>> da = xr.DataArray([1.0, 2.0], dims='x', coords={'x': midx})
+>>> midx = pd.MultiIndex.from_arrays([["a", "b"], [0, 1]], names=["lvl1", "lvl2"])
+>>> da = xr.DataArray([1.0, 2.0], dims="x", coords={"x": midx})
 >>> da
 <xarray.DataArray (x: 2)>
 array([1., 2.])
@@ -201,7 +201,9 @@ Besides `pandas.MultiIndex`, there may b
 The example given here is quite confusing, though: this is not an easily predictable behavior. We could entirely avoid the implicit creation of coordinates, e.g., using a helper function that generates coordinate + index dictionaries that we could then pass directly to the DataArray/Dataset constructor:
 
 ```python
->>> coords_dict, index_dict = create_coords_from_index(midx, dims='x', include_dim_coord=True)
+>>> coords_dict, index_dict = create_coords_from_index(
+...     midx, dims="x", include_dim_coord=True
+... )
 >>> coords_dict
 {'x': <xarray.Variable (x: 2)>
  array([('a', 0), ('b', 1)], dtype=object),
@@ -211,7 +213,7 @@ The example given here is quite confusin
  array([0, 1])}
 >>> index_dict
 {('lvl1', 'lvl2'): midx}
->>> xr.DataArray([1.0, 2.0], dims='x', coords=coords_dict, indexes=index_dict)
+>>> xr.DataArray([1.0, 2.0], dims="x", coords=coords_dict, indexes=index_dict)
 <xarray.DataArray (x: 2)>
 array([1., 2.])
 Coordinates:
diff -pruN 2025.03.1-8/design_notes/grouper_objects.md 2025.10.1-1/design_notes/grouper_objects.md
--- 2025.03.1-8/design_notes/grouper_objects.md	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/design_notes/grouper_objects.md	2025-10-10 10:38:05.000000000 +0000
@@ -8,7 +8,7 @@
 I propose the addition of Grouper objects to Xarray's public API so that
 
 ```python
-Dataset.groupby(x=BinGrouper(bins=np.arange(10, 2))))
+Dataset.groupby(x=BinGrouper(bins=np.arange(10, 2)))
 ```
 
 is identical to today's syntax:
@@ -27,7 +27,7 @@ results = []
 for element in unique_labels:
     subset = ds.sel(x=(ds.x == element))  # split
     # subset = ds.where(ds.x == element, drop=True)  # alternative
-    result = subset.mean() # apply
+    result = subset.mean()  # apply
     results.append(result)
 
 xr.concat(results)  # combine
@@ -36,7 +36,7 @@ xr.concat(results)  # combine
 to
 
 ```python
-ds.groupby('x').mean()  # splits, applies, and combines
+ds.groupby("x").mean()  # splits, applies, and combines
 ```
 
 Efficient vectorized implementations of this pattern are implemented in numpy's [`ufunc.at`](https://numpy.org/doc/stable/reference/generated/numpy.ufunc.at.html), [`ufunc.reduceat`](https://numpy.org/doc/stable/reference/generated/numpy.ufunc.reduceat.html), [`numbagg.grouped`](https://github.com/numbagg/numbagg/blob/main/numbagg/grouped.py), [`numpy_groupies`](https://github.com/ml31415/numpy-groupies), and probably more.
@@ -110,11 +110,13 @@ All Grouper objects will subclass from a
 ```python
 import abc
 
+
 class Grouper(abc.ABC):
     @abc.abstractmethod
     def factorize(self, by: DataArray):
         raise NotImplementedError
 
+
 class CustomGrouper(Grouper):
     def factorize(self, by: DataArray):
         ...
diff -pruN 2025.03.1-8/design_notes/named_array_design_doc.md 2025.10.1-1/design_notes/named_array_design_doc.md
--- 2025.03.1-8/design_notes/named_array_design_doc.md	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/design_notes/named_array_design_doc.md	2025-10-10 10:38:05.000000000 +0000
@@ -75,7 +75,6 @@ The named-array package is designed to b
      - Delete the ExplicitIndexer objects (`BasicIndexer`, `VectorizedIndexer`, `OuterIndexer`)
      - Remove explicit support for `pd.Index`. When provided with a `pd.Index` object, Variable will coerce to an array using `np.array(pd.Index)`. For Xarray's purposes, Xarray can use `as_variable` to explicitly wrap these in PandasIndexingAdapter and pass them to `Variable.__init__`.
 3. Define a minimal variable interface that the rest of Xarray can use:
-
    1. `dims`: tuple of dimension names
    2. `data`: numpy/dask/duck arrays`
    3. `attrs``: dictionary of attributes
@@ -167,8 +166,8 @@ We plan to publicize this document on :
 
 - [x] `Xarray dev call`
 - [ ] `Scientific Python discourse`
-- [ ] `Xarray Github`
-- [ ] `Twitter`
+- [ ] `Xarray GitHub`
+- [ ] `Twitter (X)`
 - [ ] `Respond to NamedTensor and Scikit-Learn issues?`
 - [ ] `Pangeo Discourse`
 - [ ] `Numpy, SciPy email lists?`
@@ -194,134 +193,132 @@ Questions:
 
 ```python
 # Sorting
-   Variable.argsort
-   Variable.searchsorted
+Variable.argsort
+Variable.searchsorted
 
 # NaN handling
-   Variable.fillna
-   Variable.isnull
-   Variable.notnull
+Variable.fillna
+Variable.isnull
+Variable.notnull
 
 # Lazy data handling
-   Variable.chunk # Could instead have accessor interface and recommend users use `Variable.dask.chunk` and `Variable.cubed.chunk`?
-   Variable.to_numpy()
-   Variable.as_numpy()
+Variable.chunk  # Could instead have accessor interface and recommend users use `Variable.dask.chunk` and `Variable.cubed.chunk`?
+Variable.to_numpy()
+Variable.as_numpy()
 
 # Xarray-specific
-   Variable.get_axis_num
-   Variable.isel
-   Variable.to_dict
+Variable.get_axis_num
+Variable.isel
+Variable.to_dict
 
 # Reductions
-   Variable.reduce
-   Variable.all
-   Variable.any
-   Variable.argmax
-   Variable.argmin
-   Variable.count
-   Variable.max
-   Variable.mean
-   Variable.median
-   Variable.min
-   Variable.prod
-   Variable.quantile
-   Variable.std
-   Variable.sum
-   Variable.var
+Variable.reduce
+Variable.all
+Variable.any
+Variable.argmax
+Variable.argmin
+Variable.count
+Variable.max
+Variable.mean
+Variable.median
+Variable.min
+Variable.prod
+Variable.quantile
+Variable.std
+Variable.sum
+Variable.var
 
 # Accumulate
-   Variable.cumprod
-   Variable.cumsum
+Variable.cumprod
+Variable.cumsum
 
 # numpy-like Methods
-   Variable.astype
-   Variable.copy
-   Variable.clip
-   Variable.round
-   Variable.item
-   Variable.where
+Variable.astype
+Variable.copy
+Variable.clip
+Variable.round
+Variable.item
+Variable.where
 
 # Reordering/Reshaping
-   Variable.squeeze
-   Variable.pad
-   Variable.roll
-   Variable.shift
-
+Variable.squeeze
+Variable.pad
+Variable.roll
+Variable.shift
 ```
 
 #### methods to be renamed from xarray.Variable
 
 ```python
 # Xarray-specific
-   Variable.concat # create two functions, one as the equivalent of `np.stack` and other for `np.concat`
+Variable.concat  # create two functions, one as the equivalent of `np.stack` and other for `np.concat`
 
-   # Given how niche these are, these would be better as functions than methods.
-   # We could also keep these in Xarray, at least for now. If we don't think people will use functionality outside of Xarray it probably is not worth the trouble of porting it (including documentation, etc).
-   Variable.coarsen # This should probably be called something like coarsen_reduce.
-   Variable.coarsen_reshape
-   Variable.rolling_window
+# Given how niche these are, these would be better as functions than methods.
+# We could also keep these in Xarray, at least for now. If we don't think people will use functionality outside of Xarray it probably is not worth the trouble of porting it (including documentation, etc).
+Variable.coarsen  # This should probably be called something like coarsen_reduce.
+Variable.coarsen_reshape
+Variable.rolling_window
 
-   Variable.set_dims # split this into broadcast_to and expand_dims
+Variable.set_dims  # split this into broadcast_to and expand_dims
 
 
 # Reordering/Reshaping
-   Variable.stack # To avoid confusion with np.stack, let's call this stack_dims.
-   Variable.transpose # Could consider calling this permute_dims, like the [array API standard](https://data-apis.org/array-api/2022.12/API_specification/manipulation_functions.html#objects-in-api)
-   Variable.unstack # Likewise, maybe call this unstack_dims?
+Variable.stack  # To avoid confusion with np.stack, let's call this stack_dims.
+Variable.transpose  # Could consider calling this permute_dims, like the [array API standard](https://data-apis.org/array-api/2022.12/API_specification/manipulation_functions.html#objects-in-api)
+Variable.unstack  # Likewise, maybe call this unstack_dims?
 ```
 
 #### methods to be removed from xarray.Variable
 
 ```python
 # Testing
-   Variable.broadcast_equals
-   Variable.equals
-   Variable.identical
-   Variable.no_conflicts
+Variable.broadcast_equals
+Variable.equals
+Variable.identical
+Variable.no_conflicts
 
 # Lazy data handling
-   Variable.compute # We can probably omit this method for now, too, given that dask.compute() uses a protocol. The other concern is that different array libraries have different notions of "compute" and this one is rather Dask specific, including conversion from Dask to NumPy arrays. For example, in JAX every operation executes eagerly, but in a non-blocking fashion, and you need to call jax.block_until_ready() to ensure computation is finished.
-   Variable.load # Could remove? compute vs load is a common source of confusion.
+Variable.compute  # We can probably omit this method for now, too, given that dask.compute() uses a protocol. The other concern is that different array libraries have different notions of "compute" and this one is rather Dask specific, including conversion from Dask to NumPy arrays. For example, in JAX every operation executes eagerly, but in a non-blocking fashion, and you need to call jax.block_until_ready() to ensure computation is finished.
+Variable.load  # Could remove? compute vs load is a common source of confusion.
 
 # Xarray-specific
-   Variable.to_index
-   Variable.to_index_variable
-   Variable.to_variable
-   Variable.to_base_variable
-   Variable.to_coord
+Variable.to_index
+Variable.to_index_variable
+Variable.to_variable
+Variable.to_base_variable
+Variable.to_coord
 
-   Variable.rank # Uses bottleneck. Delete? Could use https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.rankdata.html instead
+Variable.rank  # Uses bottleneck. Delete? Could use https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.rankdata.html instead
 
 
 # numpy-like Methods
-   Variable.conjugate # .conj is enough
-   Variable.__array_wrap__ # This is a very old NumPy protocol for duck arrays. We don't need it now that we have `__array_ufunc__` and `__array_function__`
+Variable.conjugate  # .conj is enough
+Variable.__array_wrap__  # This is a very old NumPy protocol for duck arrays. We don't need it now that we have `__array_ufunc__` and `__array_function__`
 
 # Encoding
-    Variable.reset_encoding
-
+Variable.reset_encoding
 ```
 
 #### Attributes to be preserved from xarray.Variable
 
 ```python
 # Properties
-   Variable.attrs
-   Variable.chunks
-   Variable.data
-   Variable.dims
-   Variable.dtype
-
-   Variable.nbytes
-   Variable.ndim
-   Variable.shape
-   Variable.size
-   Variable.sizes
-
-   Variable.T
-   Variable.real
-   Variable.imag
-   Variable.conj
+Variable.attrs
+Variable.chunks
+Variable.data
+Variable.dims
+Variable.dtype
+
+Variable.nbytes
+Variable.ndim
+Variable.shape
+Variable.size
+Variable.sizes
+
+Variable.T
+Variable.real
+Variable.imag
+Variable.conj
 ```
 
 #### Attributes to be renamed from xarray.Variable
@@ -333,12 +330,10 @@ Questions:
 #### Attributes to be removed from xarray.Variable
 
 ```python
-
-   Variable.values # Probably also remove -- this is a legacy from before Xarray supported dask arrays. ".data" is enough.
+Variable.values  # Probably also remove -- this is a legacy from before Xarray supported dask arrays. ".data" is enough.
 
 # Encoding
-   Variable.encoding
-
+Variable.encoding
 ```
 
 ### Appendix: Implementation Details
@@ -347,17 +342,16 @@ Questions:
 
 ```python
 class VariableArithmetic(
- ImplementsArrayReduce,
- IncludeReduceMethods,
- IncludeCumMethods,
- IncludeNumpySameMethods,
- SupportsArithmetic,
- VariableOpsMixin,
+    ImplementsArrayReduce,
+    IncludeReduceMethods,
+    IncludeCumMethods,
+    IncludeNumpySameMethods,
+    SupportsArithmetic,
+    VariableOpsMixin,
 ):
- __slots__ = ()
- # prioritize our operations over those of numpy.ndarray (priority=0)
- __array_priority__ = 50
-
+    __slots__ = ()
+    # prioritize our operations over those of numpy.ndarray (priority=0)
+    __array_priority__ = 50
 ```
 
 - Move over `_typed_ops.VariableOpsMixin`
@@ -369,7 +363,6 @@ class VariableArithmetic(
 - The Variable constructor will need to be rewritten to no longer accept tuples, encodings, etc. These details should be handled at the Xarray data structure level.
 - What happens to `duck_array_ops?`
 - What about Variable.chunk and "chunk managers"?
-
   - Could this functionality be left in Xarray proper for now? Alternative array types like JAX also have some notion of "chunks" for parallel arrays, but the details differ in a number of ways from the Dask/Cubed.
   - Perhaps variable.chunk/load methods should become functions defined in xarray that convert Variable objects. This is easy so long as xarray can reach in and replace .data
 
diff -pruN 2025.03.1-8/doc/_static/style.css 2025.10.1-1/doc/_static/style.css
--- 2025.03.1-8/doc/_static/style.css	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/doc/_static/style.css	2025-10-10 10:38:05.000000000 +0000
@@ -1,273 +1,54 @@
-table.colwidths-given {
-  table-layout: fixed;
-  width: 100%;
-}
-table.docutils td {
-  white-space: unset;
-  word-wrap: break-word;
-}
-
-.bd-header-announcement {
-  background-color: var(--pst-color-info-bg);
-}
-
-/* Reduce left and right margins */
-
-.container,
-.container-lg,
-.container-md,
-.container-sm,
-.container-xl {
-  max-width: 1350px !important;
-}
+/* Override some aspects of the pydata-sphinx-theme */
 
-/* Copied from
-https://github.com/bokeh/bokeh/blob/branch-2.4/sphinx/source/bokeh/static/custom.css
+/* Xarray Branding Guide:
+Primary Color palette (Hex): #17afb4 #e28126 #59c7d6 #0e4666 #4a4a4a
+Secondary Color Palette (Hex): #f58154 #e7b72d #b3dfe5 #8e8d99 #767985
+Primary Typeface: Acumin Variable Concept - Semicondensed Medium
 */
 
-:root {
-  /* Logo image height + all the paddings/margins make the navbar height. */
-  --navbar-height: calc(30px + 0.3125rem * 2 + 0.5rem * 2);
-}
-
-.bd-search {
-  position: relative;
-  padding-bottom: 20px;
-}
-
-@media (min-width: 768px) {
-  .search-front-page {
-    width: 50%;
-  }
-}
-
-/* minimal copy paste from bootstrap docs css to get sidebars working */
-
-.bd-toc {
-  -ms-flex-order: 2;
-  order: 2;
-  padding-top: 1.5rem;
-  padding-bottom: 1.5rem;
-  /* font-size: 0.875rem; */
-  /* add scrolling sidebar */
-  height: calc(100vh - 2rem);
-  overflow-y: auto;
-}
-
-@supports ((position: -webkit-sticky) or (position: sticky)) {
-  .bd-toc {
-    position: -webkit-sticky;
-    position: sticky;
-    top: 4rem;
-    height: calc(100vh - 4rem);
-    overflow-y: auto;
-  }
-}
-
-.section-nav {
-  padding-left: 0;
-  border-left: 1px solid #eee;
-  border-bottom: none;
-}
-
-.section-nav ul {
-  padding-left: 1rem;
-}
-
-.toc-entry {
-  display: block;
-}
-
-.toc-entry a {
-  display: block;
-  padding: 0.125rem 1.5rem;
-  color: #77757a;
-}
-
-.toc-entry a:hover {
-  color: rgba(0, 0, 0, 0.85);
-  text-decoration: none;
-}
-
-.bd-sidebar {
-  -ms-flex-order: 0;
-  order: 0;
-  border-bottom: 1px solid rgba(0, 0, 0, 0.1);
-}
-
-@media (min-width: 768px) {
-  .bd-sidebar {
-    border-right: 1px solid rgba(0, 0, 0, 0.1);
-  }
-  @supports ((position: -webkit-sticky) or (position: sticky)) {
-    .bd-sidebar {
-      position: -webkit-sticky;
-      position: sticky;
-      top: var(--navbar-height);
-      z-index: 1000;
-      height: calc(100vh - var(--navbar-height));
-    }
-  }
-}
-
-@media (min-width: 1200px) {
-  .bd-sidebar {
-    -ms-flex: 0 1 320px;
-    flex: 0 1 320px;
-  }
-}
-
-.bd-links {
-  padding-top: 1rem;
-  padding-bottom: 1rem;
-  margin-right: -15px;
-  margin-left: -15px;
-}
-
-@media (min-width: 768px) {
-  @supports ((position: -webkit-sticky) or (position: sticky)) {
-    .bd-links {
-      max-height: calc(100vh - 9rem);
-      overflow-y: auto;
-    }
-  }
-}
-
-@media (min-width: 768px) {
-  .bd-links {
-    display: block !important;
-  }
-}
-
-.bd-sidenav {
-  display: none;
-}
-
-.bd-toc-link {
-  display: block;
-  padding: 0.25rem 1.5rem;
-  font-weight: 400;
-  color: rgba(0, 0, 0, 0.65);
-}
-
-.bd-toc-link:hover {
-  color: rgba(0, 0, 0, 0.85);
-  text-decoration: none;
-}
-
-.bd-toc-item.active {
-  margin-bottom: 1rem;
-}
-
-.bd-toc-item.active:not(:first-child) {
-  margin-top: 1rem;
-}
-
-.bd-toc-item.active > .bd-toc-link {
-  color: rgba(0, 0, 0, 0.85);
-}
-
-.bd-toc-item.active > .bd-toc-link:hover {
-  background-color: transparent;
-}
-
-.bd-toc-item.active > .bd-sidenav {
-  display: block;
-}
-
-.bd-sidebar .nav > li > a {
-  display: block;
-  padding: 0.25rem 1.5rem;
-  font-size: 90%;
-}
-
-.bd-sidebar .nav > li > a:hover {
-  text-decoration: none;
-  background-color: transparent;
-}
-
-.bd-sidebar .nav > .active > a,
-.bd-sidebar .nav > .active:hover > a {
-  font-weight: 400;
-  /* adjusted from original
-  color: rgba(0, 0, 0, 0.85);
-  background-color: transparent; */
-}
-
-.bd-sidebar .nav > li > ul {
-  list-style: none;
-  padding: 0.25rem 1.5rem;
-}
-
-.bd-sidebar .nav > li > ul > li > a {
-  display: block;
-  padding: 0.25rem 1.5rem;
-  font-size: 90%;
-}
-
-.bd-sidebar .nav > li > ul > .active > a,
-.bd-sidebar .nav > li > ul > .active:hover > a {
-  font-weight: 400;
-}
-
-dt:target {
-  background-color: initial;
-}
-
-/* Offsetting anchored elements within the main content to adjust for fixed header
- https://github.com/pandas-dev/pandas-sphinx-theme/issues/6 */
-main *:target::before {
-  display: block;
-  content: "";
-  height: var(--navbar-height);
-  margin-top: calc(-1 * var(--navbar-height));
-}
-
-body {
-  width: 100%;
-}
-
-/* adjust toc font sizes to improve overview */
-.toc-h2 {
-  font-size: 0.85rem;
-}
-
-.toc-h3 {
-  font-size: 0.75rem;
-}
-
-.toc-h4 {
-  font-size: 0.65rem;
-}
-
-.toc-entry > .nav-link.active {
-  font-weight: 400;
-  color: #542437;
-  background-color: transparent;
-  border-left: 2px solid #563d7c;
-}
-
-.nav-link:hover {
-  border-style: none;
-}
-
-/* Collapsing of the TOC sidebar while scrolling */
-
-/* Nav: hide second level (shown on .active) */
-.bd-toc .nav .nav {
-  display: none;
-}
-
-.bd-toc .nav > .active > ul {
-  display: block;
-}
-
-/* Main index page overview cards */
-
-.sd-card-img-top {
-  width: 33% !important;
-  display: block;
-  margin-left: auto;
-  margin-right: auto;
-  margin-top: 10px;
+/* Increase Xarray logo size in upper left corner */
+.navbar-brand img {
+  height: 75px;
+}
+.navbar-brand {
+  height: 75px;
+}
+
+/* Adjust index page overview cards, borrowed from Pandas & Numpy */
+/* Override SVG icon color */
+html[data-theme="dark"] .sd-card img[src*=".svg"] {
+  filter: invert(0.82) brightness(0.8) contrast(1.2);
+}
+/* https://github.com/executablebooks/sphinx-design/blob/main/style/_cards.scss */
+/* More space around image */
+.intro-card {
+  padding: 30px 1px 1px 1px;
+}
+/* More prominent card borders */
+.intro-card .sd-card {
+  border: 2px solid var(--pst-color-border);
+  overflow: hidden;
+}
+/* Shrink SVG icons */
+.intro-card .sd-card-img-top {
+  margin: 1px;
+  height: 100px;
+  background-color: transparent !important;
+}
+/* Color titles like links */
+.intro-card .sd-card-title {
+  color: var(--pst-color-primary);
+  font-size: var(--pst-font-size-h5);
+}
+/* Don't have 'raised' color background for card interiors in dark mode */
+.bd-content .sd-card .sd-card-body {
+  background-color: unset !important;
+}
+
+/* workaround Pydata Sphinx theme using light colors for widget cell outputs in dark-mode */
+/* works for many widgets but not for Xarray html reprs */
+/* https://github.com/pydata/pydata-sphinx-theme/issues/2189 */
+html[data-theme="dark"] div.cell_output .text_html:has(div.xr-wrap) {
+  background-color: var(--pst-color-on-background) !important;
+  color: var(--pst-color-text-base) !important;
 }
diff -pruN 2025.03.1-8/doc/api/accessors.rst 2025.10.1-1/doc/api/accessors.rst
--- 2025.03.1-8/doc/api/accessors.rst	1970-01-01 00:00:00.000000000 +0000
+++ 2025.10.1-1/doc/api/accessors.rst	2025-10-10 10:38:05.000000000 +0000
@@ -0,0 +1,13 @@
+.. currentmodule:: xarray
+
+Accessors
+=========
+
+.. currentmodule:: xarray.core
+
+.. autosummary::
+   :toctree: ../generated/
+
+   accessor_dt.DatetimeAccessor
+   accessor_dt.TimedeltaAccessor
+   accessor_str.StringAccessor
diff -pruN 2025.03.1-8/doc/api/advanced.rst 2025.10.1-1/doc/api/advanced.rst
--- 2025.03.1-8/doc/api/advanced.rst	1970-01-01 00:00:00.000000000 +0000
+++ 2025.10.1-1/doc/api/advanced.rst	2025-10-10 10:38:05.000000000 +0000
@@ -0,0 +1,26 @@
+.. currentmodule:: xarray
+
+Advanced API
+============
+
+The methods and properties here are advanced API and not recommended for use unless you know what you are doing.
+
+.. autosummary::
+   :toctree: ../generated/
+
+   Dataset.variables
+   DataArray.variable
+   DataTree.variables
+   Variable
+   IndexVariable
+   as_variable
+   Context
+   register_dataset_accessor
+   register_dataarray_accessor
+   register_datatree_accessor
+   Dataset.set_close
+
+.. ..
+
+..    Missing:
+..    ``DataTree.set_close``
diff -pruN 2025.03.1-8/doc/api/backends.rst 2025.10.1-1/doc/api/backends.rst
--- 2025.03.1-8/doc/api/backends.rst	1970-01-01 00:00:00.000000000 +0000
+++ 2025.10.1-1/doc/api/backends.rst	2025-10-10 10:38:05.000000000 +0000
@@ -0,0 +1,41 @@
+.. currentmodule:: xarray
+
+Backends
+========
+
+.. autosummary::
+   :toctree: ../generated/
+
+   backends.BackendArray
+   backends.BackendEntrypoint
+   backends.list_engines
+   backends.refresh_engines
+
+These backends provide a low-level interface for lazily loading data from
+external file-formats or protocols, and can be manually invoked to create
+arguments for the ``load_store`` and ``dump_to_store`` Dataset methods:
+
+.. autosummary::
+   :toctree: ../generated/
+
+   backends.NetCDF4DataStore
+   backends.H5NetCDFStore
+   backends.PydapDataStore
+   backends.ScipyDataStore
+   backends.ZarrStore
+   backends.FileManager
+   backends.CachingFileManager
+   backends.DummyFileManager
+
+These BackendEntrypoints provide a basic interface to the most commonly
+used filetypes in the xarray universe.
+
+.. autosummary::
+   :toctree: ../generated/
+
+   backends.NetCDF4BackendEntrypoint
+   backends.H5netcdfBackendEntrypoint
+   backends.PydapBackendEntrypoint
+   backends.ScipyBackendEntrypoint
+   backends.StoreBackendEntrypoint
+   backends.ZarrBackendEntrypoint
diff -pruN 2025.03.1-8/doc/api/coarsen.rst 2025.10.1-1/doc/api/coarsen.rst
--- 2025.03.1-8/doc/api/coarsen.rst	1970-01-01 00:00:00.000000000 +0000
+++ 2025.10.1-1/doc/api/coarsen.rst	2025-10-10 10:38:05.000000000 +0000
@@ -0,0 +1,48 @@
+.. currentmodule:: xarray
+
+Coarsen objects
+===============
+
+.. currentmodule:: xarray.computation.rolling
+
+Dataset
+-------
+
+.. autosummary::
+   :toctree: ../generated/
+
+   DatasetCoarsen
+   DatasetCoarsen.all
+   DatasetCoarsen.any
+   DatasetCoarsen.construct
+   DatasetCoarsen.count
+   DatasetCoarsen.max
+   DatasetCoarsen.mean
+   DatasetCoarsen.median
+   DatasetCoarsen.min
+   DatasetCoarsen.prod
+   DatasetCoarsen.reduce
+   DatasetCoarsen.std
+   DatasetCoarsen.sum
+   DatasetCoarsen.var
+
+DataArray
+---------
+
+.. autosummary::
+   :toctree: ../generated/
+
+   DataArrayCoarsen
+   DataArrayCoarsen.all
+   DataArrayCoarsen.any
+   DataArrayCoarsen.construct
+   DataArrayCoarsen.count
+   DataArrayCoarsen.max
+   DataArrayCoarsen.mean
+   DataArrayCoarsen.median
+   DataArrayCoarsen.min
+   DataArrayCoarsen.prod
+   DataArrayCoarsen.reduce
+   DataArrayCoarsen.std
+   DataArrayCoarsen.sum
+   DataArrayCoarsen.var
diff -pruN 2025.03.1-8/doc/api/coordinates.rst 2025.10.1-1/doc/api/coordinates.rst
--- 2025.03.1-8/doc/api/coordinates.rst	1970-01-01 00:00:00.000000000 +0000
+++ 2025.10.1-1/doc/api/coordinates.rst	2025-10-10 10:38:05.000000000 +0000
@@ -0,0 +1,86 @@
+.. currentmodule:: xarray
+
+Coordinates
+===========
+
+Creating coordinates
+--------------------
+
+.. autosummary::
+   :toctree: ../generated/
+
+   Coordinates
+   Coordinates.from_xindex
+   Coordinates.from_pandas_multiindex
+
+Attributes
+----------
+
+.. autosummary::
+   :toctree: ../generated/
+
+   Coordinates.dims
+   Coordinates.sizes
+   Coordinates.dtypes
+   Coordinates.variables
+   Coordinates.indexes
+   Coordinates.xindexes
+
+Dictionary Interface
+--------------------
+
+Coordinates implement the mapping interface with keys given by variable names
+and values given by ``DataArray`` objects.
+
+.. autosummary::
+   :toctree: ../generated/
+
+   Coordinates.__getitem__
+   Coordinates.__setitem__
+   Coordinates.__delitem__
+   Coordinates.__or__
+   Coordinates.update
+   Coordinates.get
+   Coordinates.items
+   Coordinates.keys
+   Coordinates.values
+
+Coordinates contents
+--------------------
+
+.. autosummary::
+   :toctree: ../generated/
+
+   Coordinates.to_dataset
+   Coordinates.to_index
+   Coordinates.assign
+   Coordinates.drop_dims
+   Coordinates.drop_vars
+   Coordinates.merge
+   Coordinates.copy
+   Coordinates.rename_vars
+   Coordinates.rename_dims
+
+Comparisons
+-----------
+
+.. autosummary::
+   :toctree: ../generated/
+
+   Coordinates.equals
+   Coordinates.identical
+
+Proxies
+-------
+
+.. currentmodule:: xarray.core.coordinates
+
+Coordinates that are accessed from the ``coords`` property of Dataset, DataArray
+and DataTree objects, respectively.
+
+.. autosummary::
+   :toctree: ../generated/
+
+   DatasetCoordinates
+   DataArrayCoordinates
+   DataTreeCoordinates
diff -pruN 2025.03.1-8/doc/api/dataarray.rst 2025.10.1-1/doc/api/dataarray.rst
--- 2025.03.1-8/doc/api/dataarray.rst	1970-01-01 00:00:00.000000000 +0000
+++ 2025.10.1-1/doc/api/dataarray.rst	2025-10-10 10:38:05.000000000 +0000
@@ -0,0 +1,342 @@
+.. currentmodule:: xarray
+
+DataArray
+=========
+
+.. autosummary::
+   :toctree: ../generated/
+
+   DataArray
+
+Attributes
+----------
+
+.. autosummary::
+   :toctree: ../generated/
+
+   DataArray.values
+   DataArray.data
+   DataArray.coords
+   DataArray.dims
+   DataArray.sizes
+   DataArray.name
+   DataArray.attrs
+   DataArray.encoding
+   DataArray.indexes
+   DataArray.xindexes
+   DataArray.chunksizes
+
+ndarray attributes
+------------------
+
+.. autosummary::
+   :toctree: ../generated/
+
+   DataArray.ndim
+   DataArray.nbytes
+   DataArray.shape
+   DataArray.size
+   DataArray.dtype
+   DataArray.chunks
+
+
+DataArray contents
+------------------
+
+.. autosummary::
+   :toctree: ../generated/
+
+   DataArray.assign_coords
+   DataArray.assign_attrs
+   DataArray.pipe
+   DataArray.rename
+   DataArray.swap_dims
+   DataArray.expand_dims
+   DataArray.drop_vars
+   DataArray.drop_indexes
+   DataArray.drop_duplicates
+   DataArray.drop_encoding
+   DataArray.drop_attrs
+   DataArray.reset_coords
+   DataArray.copy
+   DataArray.convert_calendar
+   DataArray.interp_calendar
+   DataArray.get_index
+   DataArray.astype
+   DataArray.item
+
+Indexing
+--------
+
+.. autosummary::
+   :toctree: ../generated/
+
+   DataArray.__getitem__
+   DataArray.__setitem__
+   DataArray.loc
+   DataArray.isel
+   DataArray.sel
+   DataArray.drop_sel
+   DataArray.drop_isel
+   DataArray.head
+   DataArray.tail
+   DataArray.thin
+   DataArray.squeeze
+   DataArray.interp
+   DataArray.interp_like
+   DataArray.reindex
+   DataArray.reindex_like
+   DataArray.set_index
+   DataArray.reset_index
+   DataArray.set_xindex
+   DataArray.reorder_levels
+   DataArray.query
+
+Missing value handling
+----------------------
+
+.. autosummary::
+  :toctree: ../generated/
+
+  DataArray.isnull
+  DataArray.notnull
+  DataArray.combine_first
+  DataArray.count
+  DataArray.dropna
+  DataArray.fillna
+  DataArray.ffill
+  DataArray.bfill
+  DataArray.interpolate_na
+  DataArray.where
+  DataArray.isin
+
+Comparisons
+-----------
+
+.. autosummary::
+   :toctree: ../generated/
+
+   DataArray.equals
+   DataArray.identical
+   DataArray.broadcast_equals
+
+Computation
+-----------
+
+.. autosummary::
+   :toctree: ../generated/
+
+   DataArray.reduce
+   DataArray.groupby
+   DataArray.groupby_bins
+   DataArray.rolling
+   DataArray.rolling_exp
+   DataArray.cumulative
+   DataArray.weighted
+   DataArray.coarsen
+   DataArray.resample
+   DataArray.get_axis_num
+   DataArray.diff
+   DataArray.dot
+   DataArray.quantile
+   DataArray.differentiate
+   DataArray.integrate
+   DataArray.polyfit
+   DataArray.map_blocks
+   DataArray.curvefit
+
+Aggregation
+-----------
+
+.. autosummary::
+   :toctree: ../generated/
+
+   DataArray.all
+   DataArray.any
+   DataArray.argmax
+   DataArray.argmin
+   DataArray.count
+   DataArray.idxmax
+   DataArray.idxmin
+   DataArray.max
+   DataArray.min
+   DataArray.mean
+   DataArray.median
+   DataArray.prod
+   DataArray.sum
+   DataArray.std
+   DataArray.var
+   DataArray.cumsum
+   DataArray.cumprod
+
+ndarray methods
+---------------
+
+.. autosummary::
+   :toctree: ../generated/
+
+   DataArray.argsort
+   DataArray.clip
+   DataArray.conj
+   DataArray.conjugate
+   DataArray.imag
+   DataArray.searchsorted
+   DataArray.round
+   DataArray.real
+   DataArray.T
+   DataArray.rank
+
+
+String manipulation
+-------------------
+
+.. autosummary::
+   :toctree: ../generated/
+   :template: autosummary/accessor.rst
+
+   DataArray.str
+
+.. autosummary::
+   :toctree: ../generated/
+   :template: autosummary/accessor_method.rst
+
+   DataArray.str.capitalize
+   DataArray.str.casefold
+   DataArray.str.cat
+   DataArray.str.center
+   DataArray.str.contains
+   DataArray.str.count
+   DataArray.str.decode
+   DataArray.str.encode
+   DataArray.str.endswith
+   DataArray.str.extract
+   DataArray.str.extractall
+   DataArray.str.find
+   DataArray.str.findall
+   DataArray.str.format
+   DataArray.str.get
+   DataArray.str.get_dummies
+   DataArray.str.index
+   DataArray.str.isalnum
+   DataArray.str.isalpha
+   DataArray.str.isdecimal
+   DataArray.str.isdigit
+   DataArray.str.islower
+   DataArray.str.isnumeric
+   DataArray.str.isspace
+   DataArray.str.istitle
+   DataArray.str.isupper
+   DataArray.str.join
+   DataArray.str.len
+   DataArray.str.ljust
+   DataArray.str.lower
+   DataArray.str.lstrip
+   DataArray.str.match
+   DataArray.str.normalize
+   DataArray.str.pad
+   DataArray.str.partition
+   DataArray.str.repeat
+   DataArray.str.replace
+   DataArray.str.rfind
+   DataArray.str.rindex
+   DataArray.str.rjust
+   DataArray.str.rpartition
+   DataArray.str.rsplit
+   DataArray.str.rstrip
+   DataArray.str.slice
+   DataArray.str.slice_replace
+   DataArray.str.split
+   DataArray.str.startswith
+   DataArray.str.strip
+   DataArray.str.swapcase
+   DataArray.str.title
+   DataArray.str.translate
+   DataArray.str.upper
+   DataArray.str.wrap
+   DataArray.str.zfill
+
+Datetimelike properties
+-----------------------
+
+**Datetime properties**:
+
+.. autosummary::
+   :toctree: ../generated/
+   :template: autosummary/accessor_attribute.rst
+
+   DataArray.dt.year
+   DataArray.dt.month
+   DataArray.dt.day
+   DataArray.dt.hour
+   DataArray.dt.minute
+   DataArray.dt.second
+   DataArray.dt.microsecond
+   DataArray.dt.nanosecond
+   DataArray.dt.dayofweek
+   DataArray.dt.weekday
+   DataArray.dt.dayofyear
+   DataArray.dt.quarter
+   DataArray.dt.days_in_month
+   DataArray.dt.daysinmonth
+   DataArray.dt.days_in_year
+   DataArray.dt.season
+   DataArray.dt.time
+   DataArray.dt.date
+   DataArray.dt.decimal_year
+   DataArray.dt.calendar
+   DataArray.dt.is_month_start
+   DataArray.dt.is_month_end
+   DataArray.dt.is_quarter_end
+   DataArray.dt.is_year_start
+   DataArray.dt.is_leap_year
+
+**Datetime methods**:
+
+.. autosummary::
+   :toctree: ../generated/
+   :template: autosummary/accessor_method.rst
+
+   DataArray.dt.floor
+   DataArray.dt.ceil
+   DataArray.dt.isocalendar
+   DataArray.dt.round
+   DataArray.dt.strftime
+
+**Timedelta properties**:
+
+.. autosummary::
+   :toctree: ../generated/
+   :template: autosummary/accessor_attribute.rst
+
+   DataArray.dt.days
+   DataArray.dt.seconds
+   DataArray.dt.microseconds
+   DataArray.dt.nanoseconds
+   DataArray.dt.total_seconds
+
+**Timedelta methods**:
+
+.. autosummary::
+   :toctree: ../generated/
+   :template: autosummary/accessor_method.rst
+
+   DataArray.dt.floor
+   DataArray.dt.ceil
+   DataArray.dt.round
+
+
+Reshaping and reorganizing
+--------------------------
+
+.. autosummary::
+   :toctree: ../generated/
+
+   DataArray.transpose
+   DataArray.stack
+   DataArray.unstack
+   DataArray.to_unstacked_dataset
+   DataArray.shift
+   DataArray.roll
+   DataArray.pad
+   DataArray.sortby
+   DataArray.broadcast_like
diff -pruN 2025.03.1-8/doc/api/dataset.rst 2025.10.1-1/doc/api/dataset.rst
--- 2025.03.1-8/doc/api/dataset.rst	1970-01-01 00:00:00.000000000 +0000
+++ 2025.10.1-1/doc/api/dataset.rst	2025-10-10 10:38:05.000000000 +0000
@@ -0,0 +1,209 @@
+.. currentmodule:: xarray
+
+Dataset
+=======
+
+.. autosummary::
+   :toctree: ../generated/
+
+   Dataset
+
+Attributes
+----------
+
+.. autosummary::
+   :toctree: ../generated/
+
+   Dataset.dims
+   Dataset.sizes
+   Dataset.dtypes
+   Dataset.data_vars
+   Dataset.coords
+   Dataset.attrs
+   Dataset.encoding
+   Dataset.indexes
+   Dataset.xindexes
+   Dataset.chunks
+   Dataset.chunksizes
+   Dataset.nbytes
+
+Dictionary interface
+--------------------
+
+Datasets implement the mapping interface with keys given by variable names
+and values given by ``DataArray`` objects.
+
+.. autosummary::
+   :toctree: ../generated/
+
+   Dataset.__getitem__
+   Dataset.__setitem__
+   Dataset.__delitem__
+   Dataset.update
+   Dataset.get
+   Dataset.items
+   Dataset.keys
+   Dataset.values
+
+Dataset contents
+----------------
+
+.. autosummary::
+   :toctree: ../generated/
+
+   Dataset.copy
+   Dataset.assign
+   Dataset.assign_coords
+   Dataset.assign_attrs
+   Dataset.pipe
+   Dataset.merge
+   Dataset.rename
+   Dataset.rename_vars
+   Dataset.rename_dims
+   Dataset.swap_dims
+   Dataset.expand_dims
+   Dataset.drop_vars
+   Dataset.drop_indexes
+   Dataset.drop_duplicates
+   Dataset.drop_dims
+   Dataset.drop_encoding
+   Dataset.drop_attrs
+   Dataset.set_coords
+   Dataset.reset_coords
+   Dataset.convert_calendar
+   Dataset.interp_calendar
+   Dataset.get_index
+
+Comparisons
+-----------
+
+.. autosummary::
+   :toctree: ../generated/
+
+   Dataset.equals
+   Dataset.identical
+   Dataset.broadcast_equals
+
+Indexing
+--------
+
+.. autosummary::
+   :toctree: ../generated/
+
+   Dataset.loc
+   Dataset.isel
+   Dataset.sel
+   Dataset.drop_sel
+   Dataset.drop_isel
+   Dataset.head
+   Dataset.tail
+   Dataset.thin
+   Dataset.squeeze
+   Dataset.interp
+   Dataset.interp_like
+   Dataset.reindex
+   Dataset.reindex_like
+   Dataset.set_index
+   Dataset.reset_index
+   Dataset.set_xindex
+   Dataset.reorder_levels
+   Dataset.query
+
+Missing value handling
+----------------------
+
+.. autosummary::
+   :toctree: ../generated/
+
+   Dataset.isnull
+   Dataset.notnull
+   Dataset.combine_first
+   Dataset.count
+   Dataset.dropna
+   Dataset.fillna
+   Dataset.ffill
+   Dataset.bfill
+   Dataset.interpolate_na
+   Dataset.where
+   Dataset.isin
+
+Computation
+-----------
+
+.. autosummary::
+   :toctree: ../generated/
+
+   Dataset.map
+   Dataset.reduce
+   Dataset.groupby
+   Dataset.groupby_bins
+   Dataset.rolling
+   Dataset.rolling_exp
+   Dataset.cumulative
+   Dataset.weighted
+   Dataset.coarsen
+   Dataset.resample
+   Dataset.diff
+   Dataset.quantile
+   Dataset.differentiate
+   Dataset.integrate
+   Dataset.map_blocks
+   Dataset.polyfit
+   Dataset.curvefit
+   Dataset.eval
+
+Aggregation
+-----------
+
+.. autosummary::
+   :toctree: ../generated/
+
+   Dataset.all
+   Dataset.any
+   Dataset.argmax
+   Dataset.argmin
+   Dataset.count
+   Dataset.idxmax
+   Dataset.idxmin
+   Dataset.max
+   Dataset.min
+   Dataset.mean
+   Dataset.median
+   Dataset.prod
+   Dataset.sum
+   Dataset.std
+   Dataset.var
+   Dataset.cumsum
+   Dataset.cumprod
+
+ndarray methods
+---------------
+
+.. autosummary::
+   :toctree: ../generated/
+
+   Dataset.argsort
+   Dataset.astype
+   Dataset.clip
+   Dataset.conj
+   Dataset.conjugate
+   Dataset.imag
+   Dataset.round
+   Dataset.real
+   Dataset.rank
+
+Reshaping and reorganizing
+--------------------------
+
+.. autosummary::
+   :toctree: ../generated/
+
+   Dataset.transpose
+   Dataset.stack
+   Dataset.unstack
+   Dataset.to_stacked_array
+   Dataset.shift
+   Dataset.roll
+   Dataset.pad
+   Dataset.sortby
+   Dataset.broadcast_like
diff -pruN 2025.03.1-8/doc/api/datatree.rst 2025.10.1-1/doc/api/datatree.rst
--- 2025.03.1-8/doc/api/datatree.rst	1970-01-01 00:00:00.000000000 +0000
+++ 2025.10.1-1/doc/api/datatree.rst	2025-10-10 10:38:05.000000000 +0000
@@ -0,0 +1,307 @@
+.. currentmodule:: xarray
+
+DataTree
+========
+
+Creating a DataTree
+-------------------
+
+Methods of creating a ``DataTree``.
+
+.. autosummary::
+   :toctree: ../generated/
+
+   DataTree
+   DataTree.from_dict
+
+Tree Attributes
+---------------
+
+Attributes relating to the recursive tree-like structure of a ``DataTree``.
+
+.. autosummary::
+   :toctree: ../generated/
+
+   DataTree.parent
+   DataTree.children
+   DataTree.name
+   DataTree.path
+   DataTree.root
+   DataTree.is_root
+   DataTree.is_leaf
+   DataTree.leaves
+   DataTree.level
+   DataTree.depth
+   DataTree.width
+   DataTree.subtree
+   DataTree.subtree_with_keys
+   DataTree.descendants
+   DataTree.siblings
+   DataTree.lineage
+   DataTree.parents
+   DataTree.ancestors
+   DataTree.groups
+   DataTree.xindexes
+
+Data Contents
+-------------
+
+Interface to the data objects (optionally) stored inside a single ``DataTree`` node.
+This interface echoes that of ``xarray.Dataset``.
+
+.. autosummary::
+   :toctree: ../generated/
+
+   DataTree.dims
+   DataTree.sizes
+   DataTree.data_vars
+   DataTree.ds
+   DataTree.coords
+   DataTree.attrs
+   DataTree.encoding
+   DataTree.indexes
+   DataTree.nbytes
+   DataTree.dataset
+   DataTree.to_dataset
+   DataTree.has_data
+   DataTree.has_attrs
+   DataTree.is_empty
+   DataTree.is_hollow
+   DataTree.chunksizes
+
+Dictionary Interface
+--------------------
+
+``DataTree`` objects also have a dict-like interface mapping keys to either ``xarray.DataArray``\s or to child ``DataTree`` nodes.
+
+.. autosummary::
+   :toctree: ../generated/
+
+   DataTree.__getitem__
+   DataTree.__setitem__
+   DataTree.__delitem__
+   DataTree.update
+   DataTree.get
+   DataTree.items
+   DataTree.keys
+   DataTree.values
+
+Tree Manipulation
+-----------------
+
+For manipulating, traversing, navigating, or mapping over the tree structure.
+
+.. autosummary::
+   :toctree: ../generated/
+
+   DataTree.orphan
+   DataTree.same_tree
+   DataTree.relative_to
+   DataTree.iter_lineage
+   DataTree.find_common_ancestor
+   DataTree.map_over_datasets
+   DataTree.pipe
+   DataTree.match
+   DataTree.filter
+   DataTree.filter_like
+
+Pathlib-like Interface
+----------------------
+
+``DataTree`` objects deliberately echo some of the API of :py:class:`pathlib.PurePath`.
+
+.. autosummary::
+   :toctree: ../generated/
+
+   DataTree.name
+   DataTree.parent
+   DataTree.parents
+   DataTree.relative_to
+
+.. Missing:
+
+.. ..
+
+..    ``DataTree.glob``
+..    ``DataTree.joinpath``
+..    ``DataTree.with_name``
+..    ``DataTree.walk``
+..    ``DataTree.rename``
+..    ``DataTree.replace``
+
+DataTree Contents
+-----------------
+
+Manipulate the contents of all nodes in a ``DataTree`` simultaneously.
+
+.. autosummary::
+   :toctree: ../generated/
+
+   DataTree.copy
+
+   .. DataTree.assign_coords
+   .. DataTree.merge
+   .. DataTree.rename
+   .. DataTree.rename_vars
+   .. DataTree.rename_dims
+   .. DataTree.swap_dims
+   .. DataTree.expand_dims
+   .. DataTree.drop_vars
+   .. DataTree.drop_dims
+   .. DataTree.set_coords
+   .. DataTree.reset_coords
+
+DataTree Node Contents
+----------------------
+
+Manipulate the contents of a single ``DataTree`` node.
+
+.. autosummary::
+   :toctree: ../generated/
+
+   DataTree.assign
+   DataTree.drop_nodes
+
+DataTree Operations
+-------------------
+
+Apply operations over multiple ``DataTree`` objects.
+
+.. autosummary::
+   :toctree: ../generated/
+
+   map_over_datasets
+   group_subtrees
+
+Comparisons
+-----------
+
+Compare one ``DataTree`` object to another.
+
+.. autosummary::
+   :toctree: ../generated/
+
+    DataTree.isomorphic
+    DataTree.equals
+    DataTree.identical
+
+Indexing
+--------
+
+Index into all nodes in the subtree simultaneously.
+
+.. autosummary::
+   :toctree: ../generated/
+
+   DataTree.isel
+   DataTree.sel
+
+..    DataTree.drop_sel
+..    DataTree.drop_isel
+..    DataTree.head
+..    DataTree.tail
+..    DataTree.thin
+..    DataTree.squeeze
+..    DataTree.interp
+..    DataTree.interp_like
+..    DataTree.reindex
+..    DataTree.reindex_like
+..    DataTree.set_index
+..    DataTree.reset_index
+..    DataTree.reorder_levels
+..    DataTree.query
+
+.. ..
+
+..    Missing:
+..    ``DataTree.loc``
+
+
+.. Missing Value Handling
+.. ----------------------
+
+.. .. autosummary::
+..    :toctree: ../generated/
+
+..    DataTree.isnull
+..    DataTree.notnull
+..    DataTree.combine_first
+..    DataTree.dropna
+..    DataTree.fillna
+..    DataTree.ffill
+..    DataTree.bfill
+..    DataTree.interpolate_na
+..    DataTree.where
+..    DataTree.isin
+
+.. Computation
+.. -----------
+
+.. Apply a computation to the data in all nodes in the subtree simultaneously.
+
+.. .. autosummary::
+..    :toctree: ../generated/
+
+..    DataTree.map
+..    DataTree.reduce
+..    DataTree.diff
+..    DataTree.quantile
+..    DataTree.differentiate
+..    DataTree.integrate
+..    DataTree.map_blocks
+..    DataTree.polyfit
+..    DataTree.curvefit
+
+Aggregation
+-----------
+
+Aggregate data in all nodes in the subtree simultaneously.
+
+.. autosummary::
+   :toctree: ../generated/
+
+   DataTree.all
+   DataTree.any
+   DataTree.max
+   DataTree.min
+   DataTree.mean
+   DataTree.median
+   DataTree.prod
+   DataTree.sum
+   DataTree.std
+   DataTree.var
+   DataTree.cumsum
+   DataTree.cumprod
+
+ndarray methods
+---------------
+
+Methods copied from :py:class:`numpy.ndarray` objects, here applying to the data in all nodes in the subtree.
+
+.. autosummary::
+   :toctree: ../generated/
+
+   DataTree.argsort
+   DataTree.conj
+   DataTree.conjugate
+   DataTree.round
+..    DataTree.astype
+..    DataTree.clip
+..    DataTree.rank
+
+.. Reshaping and reorganising
+.. --------------------------
+
+.. Reshape or reorganise the data in all nodes in the subtree.
+
+.. .. autosummary::
+..    :toctree: ../generated/
+
+..    DataTree.transpose
+..    DataTree.stack
+..    DataTree.unstack
+..    DataTree.shift
+..    DataTree.roll
+..    DataTree.pad
+..    DataTree.sortby
+..    DataTree.broadcast_like
diff -pruN 2025.03.1-8/doc/api/deprecated.rst 2025.10.1-1/doc/api/deprecated.rst
--- 2025.03.1-8/doc/api/deprecated.rst	1970-01-01 00:00:00.000000000 +0000
+++ 2025.10.1-1/doc/api/deprecated.rst	2025-10-10 10:38:05.000000000 +0000
@@ -0,0 +1,20 @@
+.. currentmodule:: xarray
+
+Deprecated / Pending Deprecation
+================================
+
+.. autosummary::
+   :toctree: ../generated/
+
+   Dataset.drop
+   DataArray.drop
+   Dataset.apply
+   core.groupby.DataArrayGroupBy.apply
+   core.groupby.DatasetGroupBy.apply
+
+.. autosummary::
+   :toctree: ../generated/
+   :template: autosummary/accessor_attribute.rst
+
+   DataArray.dt.weekofyear
+   DataArray.dt.week
diff -pruN 2025.03.1-8/doc/api/encoding.rst 2025.10.1-1/doc/api/encoding.rst
--- 2025.03.1-8/doc/api/encoding.rst	1970-01-01 00:00:00.000000000 +0000
+++ 2025.10.1-1/doc/api/encoding.rst	2025-10-10 10:38:05.000000000 +0000
@@ -0,0 +1,17 @@
+.. currentmodule:: xarray
+
+Encoding/Decoding
+=================
+
+.. autosummary::
+   :toctree: ../generated/
+
+   decode_cf
+
+Coder objects
+-------------
+
+.. autosummary::
+   :toctree: ../generated/
+
+   coders.CFDatetimeCoder
diff -pruN 2025.03.1-8/doc/api/exceptions.rst 2025.10.1-1/doc/api/exceptions.rst
--- 2025.03.1-8/doc/api/exceptions.rst	1970-01-01 00:00:00.000000000 +0000
+++ 2025.10.1-1/doc/api/exceptions.rst	2025-10-10 10:38:05.000000000 +0000
@@ -0,0 +1,24 @@
+.. currentmodule:: xarray
+
+Exceptions
+==========
+
+.. autosummary::
+   :toctree: ../generated/
+
+   AlignmentError
+   CoordinateValidationError
+   MergeError
+   SerializationWarning
+
+DataTree
+--------
+
+Exceptions raised when manipulating trees.
+
+.. autosummary::
+   :toctree: ../generated/
+
+   TreeIsomorphismError
+   InvalidTreeError
+   NotFoundInTreeError
diff -pruN 2025.03.1-8/doc/api/groupby.rst 2025.10.1-1/doc/api/groupby.rst
--- 2025.03.1-8/doc/api/groupby.rst	1970-01-01 00:00:00.000000000 +0000
+++ 2025.10.1-1/doc/api/groupby.rst	2025-10-10 10:38:05.000000000 +0000
@@ -0,0 +1,96 @@
+.. currentmodule:: xarray
+
+GroupBy objects
+===============
+
+.. currentmodule:: xarray.core.groupby
+
+Dataset
+-------
+
+.. autosummary::
+   :toctree: ../generated/
+
+   DatasetGroupBy
+   DatasetGroupBy.map
+   DatasetGroupBy.reduce
+   DatasetGroupBy.assign
+   DatasetGroupBy.assign_coords
+   DatasetGroupBy.first
+   DatasetGroupBy.last
+   DatasetGroupBy.fillna
+   DatasetGroupBy.quantile
+   DatasetGroupBy.where
+   DatasetGroupBy.all
+   DatasetGroupBy.any
+   DatasetGroupBy.count
+   DatasetGroupBy.cumsum
+   DatasetGroupBy.cumprod
+   DatasetGroupBy.max
+   DatasetGroupBy.mean
+   DatasetGroupBy.median
+   DatasetGroupBy.min
+   DatasetGroupBy.prod
+   DatasetGroupBy.std
+   DatasetGroupBy.sum
+   DatasetGroupBy.var
+   DatasetGroupBy.dims
+   DatasetGroupBy.groups
+   DatasetGroupBy.shuffle_to_chunks
+
+DataArray
+---------
+
+.. autosummary::
+   :toctree: ../generated/
+
+   DataArrayGroupBy
+   DataArrayGroupBy.map
+   DataArrayGroupBy.reduce
+   DataArrayGroupBy.assign_coords
+   DataArrayGroupBy.first
+   DataArrayGroupBy.last
+   DataArrayGroupBy.fillna
+   DataArrayGroupBy.quantile
+   DataArrayGroupBy.where
+   DataArrayGroupBy.all
+   DataArrayGroupBy.any
+   DataArrayGroupBy.count
+   DataArrayGroupBy.cumsum
+   DataArrayGroupBy.cumprod
+   DataArrayGroupBy.max
+   DataArrayGroupBy.mean
+   DataArrayGroupBy.median
+   DataArrayGroupBy.min
+   DataArrayGroupBy.prod
+   DataArrayGroupBy.std
+   DataArrayGroupBy.sum
+   DataArrayGroupBy.var
+   DataArrayGroupBy.dims
+   DataArrayGroupBy.groups
+   DataArrayGroupBy.shuffle_to_chunks
+
+Grouper Objects
+---------------
+
+.. currentmodule:: xarray
+
+.. autosummary::
+   :toctree: ../generated/
+
+   groupers.BinGrouper
+   groupers.SeasonGrouper
+   groupers.UniqueGrouper
+
+
+Resampler Objects
+-----------------
+
+.. autosummary::
+   :toctree: ../generated/
+
+   groupers.SeasonResampler
+   groupers.SeasonResampler.compute_chunks
+
+   groupers.TimeResampler
+   groupers.TimeResampler.compute_chunks
diff -pruN 2025.03.1-8/doc/api/indexes.rst 2025.10.1-1/doc/api/indexes.rst
--- 2025.03.1-8/doc/api/indexes.rst	1970-01-01 00:00:00.000000000 +0000
+++ 2025.10.1-1/doc/api/indexes.rst	2025-10-10 10:38:05.000000000 +0000
@@ -0,0 +1,87 @@
+.. currentmodule:: xarray
+
+Indexes
+=======
+
+
+.. seealso::
+    See the Xarray gallery on `custom indexes <https://xarray-indexes.readthedocs.io/>`_ for more examples.
+
+
+Creating indexes
+----------------
+.. autosummary::
+   :toctree: ../generated/
+
+   cftime_range
+   date_range
+   date_range_like
+   indexes.RangeIndex.arange
+   indexes.RangeIndex.linspace
+
+
+Built-in Indexes
+----------------
+
+Default, pandas-backed indexes built-in to Xarray:
+
+.. autosummary::
+   :toctree: ../generated/
+
+   indexes.PandasIndex
+   indexes.PandasMultiIndex
+
+
+More complex indexes built-in to Xarray:
+
+.. autosummary::
+   :toctree: ../generated/
+
+   CFTimeIndex
+   indexes.RangeIndex
+   indexes.NDPointIndex
+   indexes.CoordinateTransformIndex
+
+
+Building custom indexes
+-----------------------
+
+These classes are building blocks for more complex Indexes:
+
+.. autosummary::
+   :toctree: ../generated/
+
+   indexes.CoordinateTransform
+   indexes.CoordinateTransformIndex
+   indexes.NDPointIndex
+   indexes.TreeAdapter
+
+The Index base class for building custom indexes:
+
+.. autosummary::
+   :toctree: ../generated/
+
+   Index
+   Index.from_variables
+   Index.concat
+   Index.stack
+   Index.unstack
+   Index.create_variables
+   Index.should_add_coord_to_array
+   Index.to_pandas_index
+   Index.isel
+   Index.sel
+   Index.join
+   Index.reindex_like
+   Index.equals
+   Index.roll
+   Index.rename
+   Index.copy
+
+
+The following are useful when building custom Indexes
+
+.. autosummary::
+   :toctree: ../generated/
+
+   IndexSelResult
diff -pruN 2025.03.1-8/doc/api/io.rst 2025.10.1-1/doc/api/io.rst
--- 2025.03.1-8/doc/api/io.rst	1970-01-01 00:00:00.000000000 +0000
+++ 2025.10.1-1/doc/api/io.rst	2025-10-10 10:38:05.000000000 +0000
@@ -0,0 +1,87 @@
+.. currentmodule:: xarray
+
+IO / Conversion
+===============
+
+Dataset methods
+---------------
+
+.. autosummary::
+   :toctree: ../generated/
+
+   load_dataset
+   open_dataset
+   open_mfdataset
+   open_zarr
+   save_mfdataset
+   Dataset.as_numpy
+   Dataset.from_dataframe
+   Dataset.from_dict
+   Dataset.to_dataarray
+   Dataset.to_dataframe
+   Dataset.to_dask_dataframe
+   Dataset.to_dict
+   Dataset.to_netcdf
+   Dataset.to_pandas
+   Dataset.to_zarr
+   Dataset.chunk
+   Dataset.close
+   Dataset.compute
+   Dataset.filter_by_attrs
+   Dataset.info
+   Dataset.load
+   Dataset.persist
+   Dataset.unify_chunks
+
+DataArray methods
+-----------------
+
+.. autosummary::
+   :toctree: ../generated/
+
+   load_dataarray
+   open_dataarray
+   DataArray.as_numpy
+   DataArray.from_dict
+   DataArray.from_iris
+   DataArray.from_series
+   DataArray.to_dask_dataframe
+   DataArray.to_dataframe
+   DataArray.to_dataset
+   DataArray.to_dict
+   DataArray.to_index
+   DataArray.to_iris
+   DataArray.to_masked_array
+   DataArray.to_netcdf
+   DataArray.to_numpy
+   DataArray.to_pandas
+   DataArray.to_series
+   DataArray.to_zarr
+   DataArray.chunk
+   DataArray.close
+   DataArray.compute
+   DataArray.persist
+   DataArray.load
+   DataArray.unify_chunks
+
+DataTree methods
+----------------
+
+.. autosummary::
+   :toctree: ../generated/
+
+   load_datatree
+   open_datatree
+   open_groups
+   DataTree.to_dict
+   DataTree.to_netcdf
+   DataTree.to_zarr
+   DataTree.chunk
+   DataTree.load
+   DataTree.compute
+   DataTree.persist
+
+.. ..
+
+..    Missing:
+..    ``open_mfdatatree``
diff -pruN 2025.03.1-8/doc/api/plotting.rst 2025.10.1-1/doc/api/plotting.rst
--- 2025.03.1-8/doc/api/plotting.rst	1970-01-01 00:00:00.000000000 +0000
+++ 2025.10.1-1/doc/api/plotting.rst	2025-10-10 10:38:05.000000000 +0000
@@ -0,0 +1,59 @@
+.. currentmodule:: xarray
+
+Plotting
+========
+
+Dataset
+-------
+
+.. autosummary::
+   :toctree: ../generated/
+   :template: autosummary/accessor_method.rst
+
+   Dataset.plot.scatter
+   Dataset.plot.quiver
+   Dataset.plot.streamplot
+
+DataArray
+---------
+
+.. autosummary::
+   :toctree: ../generated/
+   :template: autosummary/accessor_callable.rst
+
+   DataArray.plot
+
+.. autosummary::
+   :toctree: ../generated/
+   :template: autosummary/accessor_method.rst
+
+   DataArray.plot.contourf
+   DataArray.plot.contour
+   DataArray.plot.hist
+   DataArray.plot.imshow
+   DataArray.plot.line
+   DataArray.plot.pcolormesh
+   DataArray.plot.step
+   DataArray.plot.scatter
+   DataArray.plot.surface
+
+
+Faceting
+--------
+.. autosummary::
+   :toctree: ../generated/
+
+   plot.FacetGrid
+   plot.FacetGrid.add_colorbar
+   plot.FacetGrid.add_legend
+   plot.FacetGrid.add_quiverkey
+   plot.FacetGrid.map
+   plot.FacetGrid.map_dataarray
+   plot.FacetGrid.map_dataarray_line
+   plot.FacetGrid.map_dataset
+   plot.FacetGrid.map_plot1d
+   plot.FacetGrid.set_axis_labels
+   plot.FacetGrid.set_ticks
+   plot.FacetGrid.set_titles
+   plot.FacetGrid.set_xlabels
+   plot.FacetGrid.set_ylabels
diff -pruN 2025.03.1-8/doc/api/resample.rst 2025.10.1-1/doc/api/resample.rst
--- 2025.03.1-8/doc/api/resample.rst	1970-01-01 00:00:00.000000000 +0000
+++ 2025.10.1-1/doc/api/resample.rst	2025-10-10 10:38:05.000000000 +0000
@@ -0,0 +1,82 @@
+.. currentmodule:: xarray
+
+Resample objects
+================
+
+.. currentmodule:: xarray.core.resample
+
+Dataset
+-------
+
+.. autosummary::
+   :toctree: ../generated/
+
+   DatasetResample
+   DatasetResample.asfreq
+   DatasetResample.backfill
+   DatasetResample.interpolate
+   DatasetResample.nearest
+   DatasetResample.pad
+   DatasetResample.all
+   DatasetResample.any
+   DatasetResample.apply
+   DatasetResample.assign
+   DatasetResample.assign_coords
+   DatasetResample.bfill
+   DatasetResample.count
+   DatasetResample.ffill
+   DatasetResample.fillna
+   DatasetResample.first
+   DatasetResample.last
+   DatasetResample.map
+   DatasetResample.max
+   DatasetResample.mean
+   DatasetResample.median
+   DatasetResample.min
+   DatasetResample.prod
+   DatasetResample.quantile
+   DatasetResample.reduce
+   DatasetResample.std
+   DatasetResample.sum
+   DatasetResample.var
+   DatasetResample.where
+   DatasetResample.dims
+   DatasetResample.groups
+
+
+DataArray
+---------
+
+.. autosummary::
+   :toctree: ../generated/
+
+   DataArrayResample
+   DataArrayResample.asfreq
+   DataArrayResample.backfill
+   DataArrayResample.interpolate
+   DataArrayResample.nearest
+   DataArrayResample.pad
+   DataArrayResample.all
+   DataArrayResample.any
+   DataArrayResample.apply
+   DataArrayResample.assign_coords
+   DataArrayResample.bfill
+   DataArrayResample.count
+   DataArrayResample.ffill
+   DataArrayResample.fillna
+   DataArrayResample.first
+   DataArrayResample.last
+   DataArrayResample.map
+   DataArrayResample.max
+   DataArrayResample.mean
+   DataArrayResample.median
+   DataArrayResample.min
+   DataArrayResample.prod
+   DataArrayResample.quantile
+   DataArrayResample.reduce
+   DataArrayResample.std
+   DataArrayResample.sum
+   DataArrayResample.var
+   DataArrayResample.where
+   DataArrayResample.dims
+   DataArrayResample.groups
diff -pruN 2025.03.1-8/doc/api/rolling-exp.rst 2025.10.1-1/doc/api/rolling-exp.rst
--- 2025.03.1-8/doc/api/rolling-exp.rst	1970-01-01 00:00:00.000000000 +0000
+++ 2025.10.1-1/doc/api/rolling-exp.rst	2025-10-10 10:38:05.000000000 +0000
@@ -0,0 +1,13 @@
+.. currentmodule:: xarray
+
+Exponential rolling objects
+===========================
+
+.. currentmodule:: xarray.computation.rolling_exp
+
+.. autosummary::
+   :toctree: ../generated/
+
+   RollingExp
+   RollingExp.mean
+   RollingExp.sum
diff -pruN 2025.03.1-8/doc/api/rolling.rst 2025.10.1-1/doc/api/rolling.rst
--- 2025.03.1-8/doc/api/rolling.rst	1970-01-01 00:00:00.000000000 +0000
+++ 2025.10.1-1/doc/api/rolling.rst	2025-10-10 10:38:05.000000000 +0000
@@ -0,0 +1,49 @@
+.. currentmodule:: xarray
+
+Rolling objects
+===============
+
+.. currentmodule:: xarray.computation.rolling
+
+Dataset
+-------
+
+.. autosummary::
+   :toctree: ../generated/
+
+   DatasetRolling
+   DatasetRolling.construct
+   DatasetRolling.reduce
+   DatasetRolling.argmax
+   DatasetRolling.argmin
+   DatasetRolling.count
+   DatasetRolling.max
+   DatasetRolling.mean
+   DatasetRolling.median
+   DatasetRolling.min
+   DatasetRolling.prod
+   DatasetRolling.std
+   DatasetRolling.sum
+   DatasetRolling.var
+
+DataArray
+---------
+
+.. autosummary::
+   :toctree: ../generated/
+
+   DataArrayRolling
+   DataArrayRolling.__iter__
+   DataArrayRolling.construct
+   DataArrayRolling.reduce
+   DataArrayRolling.argmax
+   DataArrayRolling.argmin
+   DataArrayRolling.count
+   DataArrayRolling.max
+   DataArrayRolling.mean
+   DataArrayRolling.median
+   DataArrayRolling.min
+   DataArrayRolling.prod
+   DataArrayRolling.std
+   DataArrayRolling.sum
+   DataArrayRolling.var
diff -pruN 2025.03.1-8/doc/api/testing.rst 2025.10.1-1/doc/api/testing.rst
--- 2025.03.1-8/doc/api/testing.rst	1970-01-01 00:00:00.000000000 +0000
+++ 2025.10.1-1/doc/api/testing.rst	2025-10-10 10:38:05.000000000 +0000
@@ -0,0 +1,42 @@
+.. currentmodule:: xarray
+
+Testing
+=======
+
+.. autosummary::
+   :toctree: ../generated/
+
+   testing.assert_equal
+   testing.assert_identical
+   testing.assert_allclose
+   testing.assert_chunks_equal
+
+Test that two ``DataTree`` objects are similar.
+
+.. autosummary::
+   :toctree: ../generated/
+
+   testing.assert_isomorphic
+   testing.assert_equal
+   testing.assert_identical
+
+Hypothesis Testing Strategies
+=============================
+
+.. currentmodule:: xarray
+
+See the :ref:`documentation page on testing <testing.hypothesis>` for a guide on how to use these strategies.
+
+.. warning::
+    These strategies should be considered highly experimental, and liable to change at any time.
+
+.. autosummary::
+   :toctree: ../generated/
+
+   testing.strategies.supported_dtypes
+   testing.strategies.names
+   testing.strategies.dimension_names
+   testing.strategies.dimension_sizes
+   testing.strategies.attrs
+   testing.strategies.variables
+   testing.strategies.unique_subset_of
diff -pruN 2025.03.1-8/doc/api/top-level.rst 2025.10.1-1/doc/api/top-level.rst
--- 2025.03.1-8/doc/api/top-level.rst	1970-01-01 00:00:00.000000000 +0000
+++ 2025.10.1-1/doc/api/top-level.rst	2025-10-10 10:38:05.000000000 +0000
@@ -0,0 +1,57 @@
+.. currentmodule:: xarray
+
+Top-level functions
+===================
+
+Computation
+-----------
+
+.. autosummary::
+   :toctree: ../generated/
+
+   apply_ufunc
+   cov
+   corr
+   cross
+   dot
+   map_blocks
+   polyval
+   unify_chunks
+   where
+
+Combining Data
+--------------
+
+.. autosummary::
+   :toctree: ../generated/
+
+   align
+   broadcast
+   concat
+   merge
+   combine_by_coords
+   combine_nested
+
+Creation
+--------
+.. autosummary::
+   :toctree: ../generated/
+
+   DataArray
+   Dataset
+   DataTree
+   full_like
+   zeros_like
+   ones_like
+
+Miscellaneous
+-------------
+
+.. autosummary::
+   :toctree: ../generated/
+
+   decode_cf
+   infer_freq
+   show_versions
+   set_options
+   get_options
diff -pruN 2025.03.1-8/doc/api/tutorial.rst 2025.10.1-1/doc/api/tutorial.rst
--- 2025.03.1-8/doc/api/tutorial.rst	1970-01-01 00:00:00.000000000 +0000
+++ 2025.10.1-1/doc/api/tutorial.rst	2025-10-10 10:38:05.000000000 +0000
@@ -0,0 +1,12 @@
+.. currentmodule:: xarray
+
+Tutorial
+========
+
+.. autosummary::
+   :toctree: ../generated/
+
+   tutorial.open_dataset
+   tutorial.load_dataset
+   tutorial.open_datatree
+   tutorial.load_datatree
diff -pruN 2025.03.1-8/doc/api/ufuncs.rst 2025.10.1-1/doc/api/ufuncs.rst
--- 2025.03.1-8/doc/api/ufuncs.rst	1970-01-01 00:00:00.000000000 +0000
+++ 2025.10.1-1/doc/api/ufuncs.rst	2025-10-10 10:38:05.000000000 +0000
@@ -0,0 +1,115 @@
+.. currentmodule:: xarray
+
+Universal functions
+===================
+
+These functions are equivalent to their NumPy versions, but for xarray
+objects backed by non-NumPy array types (e.g. ``cupy``, ``sparse``, or ``jax``),
+they will ensure that the computation is dispatched to the appropriate
+backend. You can find them in the ``xarray.ufuncs`` module:
+
+.. autosummary::
+   :toctree: ../generated/
+
+   ufuncs.abs
+   ufuncs.absolute
+   ufuncs.acos
+   ufuncs.acosh
+   ufuncs.arccos
+   ufuncs.arccosh
+   ufuncs.arcsin
+   ufuncs.arcsinh
+   ufuncs.arctan
+   ufuncs.arctanh
+   ufuncs.asin
+   ufuncs.asinh
+   ufuncs.atan
+   ufuncs.atanh
+   ufuncs.bitwise_count
+   ufuncs.bitwise_invert
+   ufuncs.bitwise_not
+   ufuncs.cbrt
+   ufuncs.ceil
+   ufuncs.conj
+   ufuncs.conjugate
+   ufuncs.cos
+   ufuncs.cosh
+   ufuncs.deg2rad
+   ufuncs.degrees
+   ufuncs.exp
+   ufuncs.exp2
+   ufuncs.expm1
+   ufuncs.fabs
+   ufuncs.floor
+   ufuncs.invert
+   ufuncs.isfinite
+   ufuncs.isinf
+   ufuncs.isnan
+   ufuncs.isnat
+   ufuncs.log
+   ufuncs.log10
+   ufuncs.log1p
+   ufuncs.log2
+   ufuncs.logical_not
+   ufuncs.negative
+   ufuncs.positive
+   ufuncs.rad2deg
+   ufuncs.radians
+   ufuncs.reciprocal
+   ufuncs.rint
+   ufuncs.sign
+   ufuncs.signbit
+   ufuncs.sin
+   ufuncs.sinh
+   ufuncs.spacing
+   ufuncs.sqrt
+   ufuncs.square
+   ufuncs.tan
+   ufuncs.tanh
+   ufuncs.trunc
+   ufuncs.add
+   ufuncs.arctan2
+   ufuncs.atan2
+   ufuncs.bitwise_and
+   ufuncs.bitwise_left_shift
+   ufuncs.bitwise_or
+   ufuncs.bitwise_right_shift
+   ufuncs.bitwise_xor
+   ufuncs.copysign
+   ufuncs.divide
+   ufuncs.equal
+   ufuncs.float_power
+   ufuncs.floor_divide
+   ufuncs.fmax
+   ufuncs.fmin
+   ufuncs.fmod
+   ufuncs.gcd
+   ufuncs.greater
+   ufuncs.greater_equal
+   ufuncs.heaviside
+   ufuncs.hypot
+   ufuncs.lcm
+   ufuncs.ldexp
+   ufuncs.left_shift
+   ufuncs.less
+   ufuncs.less_equal
+   ufuncs.logaddexp
+   ufuncs.logaddexp2
+   ufuncs.logical_and
+   ufuncs.logical_or
+   ufuncs.logical_xor
+   ufuncs.maximum
+   ufuncs.minimum
+   ufuncs.mod
+   ufuncs.multiply
+   ufuncs.nextafter
+   ufuncs.not_equal
+   ufuncs.pow
+   ufuncs.power
+   ufuncs.remainder
+   ufuncs.right_shift
+   ufuncs.subtract
+   ufuncs.true_divide
+   ufuncs.angle
+   ufuncs.isreal
+   ufuncs.iscomplex
diff -pruN 2025.03.1-8/doc/api/weighted.rst 2025.10.1-1/doc/api/weighted.rst
--- 2025.03.1-8/doc/api/weighted.rst	1970-01-01 00:00:00.000000000 +0000
+++ 2025.10.1-1/doc/api/weighted.rst	2025-10-10 10:38:05.000000000 +0000
@@ -0,0 +1,36 @@
+.. currentmodule:: xarray
+
+Weighted objects
+================
+
+.. currentmodule:: xarray.computation.weighted
+
+Dataset
+-------
+
+.. autosummary::
+   :toctree: ../generated/
+
+   DatasetWeighted
+   DatasetWeighted.mean
+   DatasetWeighted.quantile
+   DatasetWeighted.sum
+   DatasetWeighted.std
+   DatasetWeighted.var
+   DatasetWeighted.sum_of_weights
+   DatasetWeighted.sum_of_squares
+
+DataArray
+---------
+
+.. autosummary::
+   :toctree: ../generated/
+
+   DataArrayWeighted
+   DataArrayWeighted.mean
+   DataArrayWeighted.quantile
+   DataArrayWeighted.sum
+   DataArrayWeighted.std
+   DataArrayWeighted.var
+   DataArrayWeighted.sum_of_weights
+   DataArrayWeighted.sum_of_squares
diff -pruN 2025.03.1-8/doc/api-hidden.rst 2025.10.1-1/doc/api-hidden.rst
--- 2025.03.1-8/doc/api-hidden.rst	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/doc/api-hidden.rst	2025-10-10 10:38:05.000000000 +0000
@@ -228,6 +228,7 @@
    Variable.isnull
    Variable.item
    Variable.load
+   Variable.load_async
    Variable.max
    Variable.mean
    Variable.median
@@ -515,20 +516,9 @@
    CFTimeIndex.values
    CFTimeIndex.year
 
-   Index.from_variables
-   Index.concat
-   Index.stack
-   Index.unstack
-   Index.create_variables
-   Index.to_pandas_index
-   Index.isel
-   Index.sel
-   Index.join
-   Index.reindex_like
-   Index.equals
-   Index.roll
-   Index.rename
-   Index.copy
+   indexes.RangeIndex.start
+   indexes.RangeIndex.stop
+   indexes.RangeIndex.step
 
    backends.NetCDF4DataStore.close
    backends.NetCDF4DataStore.encode
diff -pruN 2025.03.1-8/doc/api.rst 2025.10.1-1/doc/api.rst
--- 2025.03.1-8/doc/api.rst	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/doc/api.rst	2025-10-10 10:38:05.000000000 +0000
@@ -12,1729 +12,29 @@ documentation.
 
 See also: :ref:`public-api` and :ref:`api-stability`.
 
-Top-level functions
-===================
+.. toctree::
+   :maxdepth: 1
 
-.. autosummary::
-   :toctree: generated/
-
-   apply_ufunc
-   align
-   broadcast
-   concat
-   merge
-   combine_by_coords
-   combine_nested
-   where
-   infer_freq
-   full_like
-   zeros_like
-   ones_like
-   cov
-   corr
-   cross
-   dot
-   polyval
-   map_blocks
-   show_versions
-   set_options
-   get_options
-   unify_chunks
-
-Dataset
-=======
-
-Creating a dataset
-------------------
-
-.. autosummary::
-   :toctree: generated/
-
-   Dataset
-   decode_cf
-
-Attributes
-----------
-
-.. autosummary::
-   :toctree: generated/
-
-   Dataset.dims
-   Dataset.sizes
-   Dataset.dtypes
-   Dataset.data_vars
-   Dataset.coords
-   Dataset.attrs
-   Dataset.encoding
-   Dataset.indexes
-   Dataset.xindexes
-   Dataset.chunks
-   Dataset.chunksizes
-   Dataset.nbytes
-
-Dictionary interface
---------------------
-
-Datasets implement the mapping interface with keys given by variable names
-and values given by ``DataArray`` objects.
-
-.. autosummary::
-   :toctree: generated/
-
-   Dataset.__getitem__
-   Dataset.__setitem__
-   Dataset.__delitem__
-   Dataset.update
-   Dataset.get
-   Dataset.items
-   Dataset.keys
-   Dataset.values
-
-Dataset contents
-----------------
-
-.. autosummary::
-   :toctree: generated/
-
-   Dataset.copy
-   Dataset.assign
-   Dataset.assign_coords
-   Dataset.assign_attrs
-   Dataset.pipe
-   Dataset.merge
-   Dataset.rename
-   Dataset.rename_vars
-   Dataset.rename_dims
-   Dataset.swap_dims
-   Dataset.expand_dims
-   Dataset.drop_vars
-   Dataset.drop_indexes
-   Dataset.drop_duplicates
-   Dataset.drop_dims
-   Dataset.drop_encoding
-   Dataset.drop_attrs
-   Dataset.set_coords
-   Dataset.reset_coords
-   Dataset.convert_calendar
-   Dataset.interp_calendar
-   Dataset.get_index
-
-Comparisons
------------
-
-.. autosummary::
-   :toctree: generated/
-
-   Dataset.equals
-   Dataset.identical
-   Dataset.broadcast_equals
-
-Indexing
---------
-
-.. autosummary::
-   :toctree: generated/
-
-   Dataset.loc
-   Dataset.isel
-   Dataset.sel
-   Dataset.drop_sel
-   Dataset.drop_isel
-   Dataset.head
-   Dataset.tail
-   Dataset.thin
-   Dataset.squeeze
-   Dataset.interp
-   Dataset.interp_like
-   Dataset.reindex
-   Dataset.reindex_like
-   Dataset.set_index
-   Dataset.reset_index
-   Dataset.set_xindex
-   Dataset.reorder_levels
-   Dataset.query
-
-Missing value handling
-----------------------
-
-.. autosummary::
-   :toctree: generated/
-
-   Dataset.isnull
-   Dataset.notnull
-   Dataset.combine_first
-   Dataset.count
-   Dataset.dropna
-   Dataset.fillna
-   Dataset.ffill
-   Dataset.bfill
-   Dataset.interpolate_na
-   Dataset.where
-   Dataset.isin
-
-Computation
------------
-
-.. autosummary::
-   :toctree: generated/
-
-   Dataset.map
-   Dataset.reduce
-   Dataset.groupby
-   Dataset.groupby_bins
-   Dataset.rolling
-   Dataset.rolling_exp
-   Dataset.cumulative
-   Dataset.weighted
-   Dataset.coarsen
-   Dataset.resample
-   Dataset.diff
-   Dataset.quantile
-   Dataset.differentiate
-   Dataset.integrate
-   Dataset.map_blocks
-   Dataset.polyfit
-   Dataset.curvefit
-   Dataset.eval
-
-Aggregation
------------
-
-.. autosummary::
-   :toctree: generated/
-
-   Dataset.all
-   Dataset.any
-   Dataset.argmax
-   Dataset.argmin
-   Dataset.count
-   Dataset.idxmax
-   Dataset.idxmin
-   Dataset.max
-   Dataset.min
-   Dataset.mean
-   Dataset.median
-   Dataset.prod
-   Dataset.sum
-   Dataset.std
-   Dataset.var
-   Dataset.cumsum
-   Dataset.cumprod
-
-ndarray methods
----------------
-
-.. autosummary::
-   :toctree: generated/
-
-   Dataset.argsort
-   Dataset.astype
-   Dataset.clip
-   Dataset.conj
-   Dataset.conjugate
-   Dataset.imag
-   Dataset.round
-   Dataset.real
-   Dataset.rank
-
-Reshaping and reorganizing
---------------------------
-
-.. autosummary::
-   :toctree: generated/
-
-   Dataset.transpose
-   Dataset.stack
-   Dataset.unstack
-   Dataset.to_stacked_array
-   Dataset.shift
-   Dataset.roll
-   Dataset.pad
-   Dataset.sortby
-   Dataset.broadcast_like
-
-DataArray
-=========
-
-.. autosummary::
-   :toctree: generated/
-
-   DataArray
-
-Attributes
-----------
-
-.. autosummary::
-   :toctree: generated/
-
-   DataArray.values
-   DataArray.data
-   DataArray.coords
-   DataArray.dims
-   DataArray.sizes
-   DataArray.name
-   DataArray.attrs
-   DataArray.encoding
-   DataArray.indexes
-   DataArray.xindexes
-   DataArray.chunksizes
-
-ndarray attributes
-------------------
-
-.. autosummary::
-   :toctree: generated/
-
-   DataArray.ndim
-   DataArray.nbytes
-   DataArray.shape
-   DataArray.size
-   DataArray.dtype
-   DataArray.chunks
-
-
-DataArray contents
-------------------
-
-.. autosummary::
-   :toctree: generated/
-
-   DataArray.assign_coords
-   DataArray.assign_attrs
-   DataArray.pipe
-   DataArray.rename
-   DataArray.swap_dims
-   DataArray.expand_dims
-   DataArray.drop_vars
-   DataArray.drop_indexes
-   DataArray.drop_duplicates
-   DataArray.drop_encoding
-   DataArray.drop_attrs
-   DataArray.reset_coords
-   DataArray.copy
-   DataArray.convert_calendar
-   DataArray.interp_calendar
-   DataArray.get_index
-   DataArray.astype
-   DataArray.item
-
-Indexing
---------
-
-.. autosummary::
-   :toctree: generated/
-
-   DataArray.__getitem__
-   DataArray.__setitem__
-   DataArray.loc
-   DataArray.isel
-   DataArray.sel
-   DataArray.drop_sel
-   DataArray.drop_isel
-   DataArray.head
-   DataArray.tail
-   DataArray.thin
-   DataArray.squeeze
-   DataArray.interp
-   DataArray.interp_like
-   DataArray.reindex
-   DataArray.reindex_like
-   DataArray.set_index
-   DataArray.reset_index
-   DataArray.set_xindex
-   DataArray.reorder_levels
-   DataArray.query
-
-Missing value handling
-----------------------
-
-.. autosummary::
-  :toctree: generated/
-
-  DataArray.isnull
-  DataArray.notnull
-  DataArray.combine_first
-  DataArray.count
-  DataArray.dropna
-  DataArray.fillna
-  DataArray.ffill
-  DataArray.bfill
-  DataArray.interpolate_na
-  DataArray.where
-  DataArray.isin
-
-Comparisons
------------
-
-.. autosummary::
-   :toctree: generated/
-
-   DataArray.equals
-   DataArray.identical
-   DataArray.broadcast_equals
-
-Computation
------------
-
-.. autosummary::
-   :toctree: generated/
-
-   DataArray.reduce
-   DataArray.groupby
-   DataArray.groupby_bins
-   DataArray.rolling
-   DataArray.rolling_exp
-   DataArray.cumulative
-   DataArray.weighted
-   DataArray.coarsen
-   DataArray.resample
-   DataArray.get_axis_num
-   DataArray.diff
-   DataArray.dot
-   DataArray.quantile
-   DataArray.differentiate
-   DataArray.integrate
-   DataArray.polyfit
-   DataArray.map_blocks
-   DataArray.curvefit
-
-Aggregation
------------
-
-.. autosummary::
-   :toctree: generated/
-
-   DataArray.all
-   DataArray.any
-   DataArray.argmax
-   DataArray.argmin
-   DataArray.count
-   DataArray.idxmax
-   DataArray.idxmin
-   DataArray.max
-   DataArray.min
-   DataArray.mean
-   DataArray.median
-   DataArray.prod
-   DataArray.sum
-   DataArray.std
-   DataArray.var
-   DataArray.cumsum
-   DataArray.cumprod
-
-ndarray methods
----------------
-
-.. autosummary::
-   :toctree: generated/
-
-   DataArray.argsort
-   DataArray.clip
-   DataArray.conj
-   DataArray.conjugate
-   DataArray.imag
-   DataArray.searchsorted
-   DataArray.round
-   DataArray.real
-   DataArray.T
-   DataArray.rank
-
-
-String manipulation
--------------------
-
-.. autosummary::
-   :toctree: generated/
-   :template: autosummary/accessor.rst
-
-   DataArray.str
-
-.. autosummary::
-   :toctree: generated/
-   :template: autosummary/accessor_method.rst
-
-   DataArray.str.capitalize
-   DataArray.str.casefold
-   DataArray.str.cat
-   DataArray.str.center
-   DataArray.str.contains
-   DataArray.str.count
-   DataArray.str.decode
-   DataArray.str.encode
-   DataArray.str.endswith
-   DataArray.str.extract
-   DataArray.str.extractall
-   DataArray.str.find
-   DataArray.str.findall
-   DataArray.str.format
-   DataArray.str.get
-   DataArray.str.get_dummies
-   DataArray.str.index
-   DataArray.str.isalnum
-   DataArray.str.isalpha
-   DataArray.str.isdecimal
-   DataArray.str.isdigit
-   DataArray.str.islower
-   DataArray.str.isnumeric
-   DataArray.str.isspace
-   DataArray.str.istitle
-   DataArray.str.isupper
-   DataArray.str.join
-   DataArray.str.len
-   DataArray.str.ljust
-   DataArray.str.lower
-   DataArray.str.lstrip
-   DataArray.str.match
-   DataArray.str.normalize
-   DataArray.str.pad
-   DataArray.str.partition
-   DataArray.str.repeat
-   DataArray.str.replace
-   DataArray.str.rfind
-   DataArray.str.rindex
-   DataArray.str.rjust
-   DataArray.str.rpartition
-   DataArray.str.rsplit
-   DataArray.str.rstrip
-   DataArray.str.slice
-   DataArray.str.slice_replace
-   DataArray.str.split
-   DataArray.str.startswith
-   DataArray.str.strip
-   DataArray.str.swapcase
-   DataArray.str.title
-   DataArray.str.translate
-   DataArray.str.upper
-   DataArray.str.wrap
-   DataArray.str.zfill
-
-Datetimelike properties
------------------------
-
-**Datetime properties**:
-
-.. autosummary::
-   :toctree: generated/
-   :template: autosummary/accessor_attribute.rst
-
-   DataArray.dt.year
-   DataArray.dt.month
-   DataArray.dt.day
-   DataArray.dt.hour
-   DataArray.dt.minute
-   DataArray.dt.second
-   DataArray.dt.microsecond
-   DataArray.dt.nanosecond
-   DataArray.dt.dayofweek
-   DataArray.dt.weekday
-   DataArray.dt.dayofyear
-   DataArray.dt.quarter
-   DataArray.dt.days_in_month
-   DataArray.dt.daysinmonth
-   DataArray.dt.days_in_year
-   DataArray.dt.season
-   DataArray.dt.time
-   DataArray.dt.date
-   DataArray.dt.decimal_year
-   DataArray.dt.calendar
-   DataArray.dt.is_month_start
-   DataArray.dt.is_month_end
-   DataArray.dt.is_quarter_end
-   DataArray.dt.is_year_start
-   DataArray.dt.is_leap_year
-
-**Datetime methods**:
-
-.. autosummary::
-   :toctree: generated/
-   :template: autosummary/accessor_method.rst
-
-   DataArray.dt.floor
-   DataArray.dt.ceil
-   DataArray.dt.isocalendar
-   DataArray.dt.round
-   DataArray.dt.strftime
-
-**Timedelta properties**:
-
-.. autosummary::
-   :toctree: generated/
-   :template: autosummary/accessor_attribute.rst
-
-   DataArray.dt.days
-   DataArray.dt.seconds
-   DataArray.dt.microseconds
-   DataArray.dt.nanoseconds
-   DataArray.dt.total_seconds
-
-**Timedelta methods**:
-
-.. autosummary::
-   :toctree: generated/
-   :template: autosummary/accessor_method.rst
-
-   DataArray.dt.floor
-   DataArray.dt.ceil
-   DataArray.dt.round
-
-
-Reshaping and reorganizing
---------------------------
-
-.. autosummary::
-   :toctree: generated/
-
-   DataArray.transpose
-   DataArray.stack
-   DataArray.unstack
-   DataArray.to_unstacked_dataset
-   DataArray.shift
-   DataArray.roll
-   DataArray.pad
-   DataArray.sortby
-   DataArray.broadcast_like
-
-DataTree
-========
-
-Creating a DataTree
--------------------
-
-Methods of creating a ``DataTree``.
-
-.. autosummary::
-   :toctree: generated/
-
-   DataTree
-   DataTree.from_dict
-
-Tree Attributes
----------------
-
-Attributes relating to the recursive tree-like structure of a ``DataTree``.
-
-.. autosummary::
-   :toctree: generated/
-
-   DataTree.parent
-   DataTree.children
-   DataTree.name
-   DataTree.path
-   DataTree.root
-   DataTree.is_root
-   DataTree.is_leaf
-   DataTree.leaves
-   DataTree.level
-   DataTree.depth
-   DataTree.width
-   DataTree.subtree
-   DataTree.subtree_with_keys
-   DataTree.descendants
-   DataTree.siblings
-   DataTree.lineage
-   DataTree.parents
-   DataTree.ancestors
-   DataTree.groups
-   DataTree.xindexes
-
-Data Contents
--------------
-
-Interface to the data objects (optionally) stored inside a single ``DataTree`` node.
-This interface echoes that of ``xarray.Dataset``.
-
-.. autosummary::
-   :toctree: generated/
-
-   DataTree.dims
-   DataTree.sizes
-   DataTree.data_vars
-   DataTree.ds
-   DataTree.coords
-   DataTree.attrs
-   DataTree.encoding
-   DataTree.indexes
-   DataTree.nbytes
-   DataTree.dataset
-   DataTree.to_dataset
-   DataTree.has_data
-   DataTree.has_attrs
-   DataTree.is_empty
-   DataTree.is_hollow
-   DataTree.chunksizes
-
-Dictionary Interface
---------------------
-
-``DataTree`` objects also have a dict-like interface mapping keys to either ``xarray.DataArray``\s or to child ``DataTree`` nodes.
-
-.. autosummary::
-   :toctree: generated/
-
-   DataTree.__getitem__
-   DataTree.__setitem__
-   DataTree.__delitem__
-   DataTree.update
-   DataTree.get
-   DataTree.items
-   DataTree.keys
-   DataTree.values
-
-Tree Manipulation
------------------
-
-For manipulating, traversing, navigating, or mapping over the tree structure.
-
-.. autosummary::
-   :toctree: generated/
-
-   DataTree.orphan
-   DataTree.same_tree
-   DataTree.relative_to
-   DataTree.iter_lineage
-   DataTree.find_common_ancestor
-   DataTree.map_over_datasets
-   DataTree.pipe
-   DataTree.match
-   DataTree.filter
-   DataTree.filter_like
-
-Pathlib-like Interface
-----------------------
-
-``DataTree`` objects deliberately echo some of the API of :py:class:`pathlib.PurePath`.
-
-.. autosummary::
-   :toctree: generated/
-
-   DataTree.name
-   DataTree.parent
-   DataTree.parents
-   DataTree.relative_to
-
-.. Missing:
-
-.. ..
-
-..    ``DataTree.glob``
-..    ``DataTree.joinpath``
-..    ``DataTree.with_name``
-..    ``DataTree.walk``
-..    ``DataTree.rename``
-..    ``DataTree.replace``
-
-DataTree Contents
------------------
-
-Manipulate the contents of all nodes in a ``DataTree`` simultaneously.
-
-.. autosummary::
-   :toctree: generated/
-
-   DataTree.copy
-
-   .. DataTree.assign_coords
-   .. DataTree.merge
-   .. DataTree.rename
-   .. DataTree.rename_vars
-   .. DataTree.rename_dims
-   .. DataTree.swap_dims
-   .. DataTree.expand_dims
-   .. DataTree.drop_vars
-   .. DataTree.drop_dims
-   .. DataTree.set_coords
-   .. DataTree.reset_coords
-
-DataTree Node Contents
-----------------------
-
-Manipulate the contents of a single ``DataTree`` node.
-
-.. autosummary::
-   :toctree: generated/
-
-   DataTree.assign
-   DataTree.drop_nodes
-
-DataTree Operations
--------------------
-
-Apply operations over multiple ``DataTree`` objects.
-
-.. autosummary::
-   :toctree: generated/
-
-   map_over_datasets
-   group_subtrees
-
-Comparisons
------------
-
-Compare one ``DataTree`` object to another.
-
-.. autosummary::
-   :toctree: generated/
-
-    DataTree.isomorphic
-    DataTree.equals
-    DataTree.identical
-
-Indexing
---------
-
-Index into all nodes in the subtree simultaneously.
-
-.. autosummary::
-   :toctree: generated/
-
-   DataTree.isel
-   DataTree.sel
-
-..    DataTree.drop_sel
-..    DataTree.drop_isel
-..    DataTree.head
-..    DataTree.tail
-..    DataTree.thin
-..    DataTree.squeeze
-..    DataTree.interp
-..    DataTree.interp_like
-..    DataTree.reindex
-..    DataTree.reindex_like
-..    DataTree.set_index
-..    DataTree.reset_index
-..    DataTree.reorder_levels
-..    DataTree.query
-
-.. ..
-
-..    Missing:
-..    ``DataTree.loc``
-
-
-.. Missing Value Handling
-.. ----------------------
-
-.. .. autosummary::
-..    :toctree: generated/
-
-..    DataTree.isnull
-..    DataTree.notnull
-..    DataTree.combine_first
-..    DataTree.dropna
-..    DataTree.fillna
-..    DataTree.ffill
-..    DataTree.bfill
-..    DataTree.interpolate_na
-..    DataTree.where
-..    DataTree.isin
-
-.. Computation
-.. -----------
-
-.. Apply a computation to the data in all nodes in the subtree simultaneously.
-
-.. .. autosummary::
-..    :toctree: generated/
-
-..    DataTree.map
-..    DataTree.reduce
-..    DataTree.diff
-..    DataTree.quantile
-..    DataTree.differentiate
-..    DataTree.integrate
-..    DataTree.map_blocks
-..    DataTree.polyfit
-..    DataTree.curvefit
-
-Aggregation
------------
-
-Aggregate data in all nodes in the subtree simultaneously.
-
-.. autosummary::
-   :toctree: generated/
-
-   DataTree.all
-   DataTree.any
-   DataTree.max
-   DataTree.min
-   DataTree.mean
-   DataTree.median
-   DataTree.prod
-   DataTree.sum
-   DataTree.std
-   DataTree.var
-   DataTree.cumsum
-   DataTree.cumprod
-
-ndarray methods
----------------
-
-Methods copied from :py:class:`numpy.ndarray` objects, here applying to the data in all nodes in the subtree.
-
-.. autosummary::
-   :toctree: generated/
-
-   DataTree.argsort
-   DataTree.conj
-   DataTree.conjugate
-   DataTree.round
-..    DataTree.astype
-..    DataTree.clip
-..    DataTree.rank
-
-.. Reshaping and reorganising
-.. --------------------------
-
-.. Reshape or reorganise the data in all nodes in the subtree.
-
-.. .. autosummary::
-..    :toctree: generated/
-
-..    DataTree.transpose
-..    DataTree.stack
-..    DataTree.unstack
-..    DataTree.shift
-..    DataTree.roll
-..    DataTree.pad
-..    DataTree.sortby
-..    DataTree.broadcast_like
-
-Coordinates
-===========
-
-Creating coordinates
---------------------
-
-.. autosummary::
-   :toctree: generated/
-
-   Coordinates
-   Coordinates.from_xindex
-   Coordinates.from_pandas_multiindex
-
-Attributes
-----------
-
-.. autosummary::
-   :toctree: generated/
-
-   Coordinates.dims
-   Coordinates.sizes
-   Coordinates.dtypes
-   Coordinates.variables
-   Coordinates.indexes
-   Coordinates.xindexes
-
-Dictionary Interface
---------------------
-
-Coordinates implement the mapping interface with keys given by variable names
-and values given by ``DataArray`` objects.
-
-.. autosummary::
-   :toctree: generated/
-
-   Coordinates.__getitem__
-   Coordinates.__setitem__
-   Coordinates.__delitem__
-   Coordinates.update
-   Coordinates.get
-   Coordinates.items
-   Coordinates.keys
-   Coordinates.values
-
-Coordinates contents
---------------------
-
-.. autosummary::
-   :toctree: generated/
-
-   Coordinates.to_dataset
-   Coordinates.to_index
-   Coordinates.assign
-   Coordinates.merge
-   Coordinates.copy
-
-Comparisons
------------
-
-.. autosummary::
-   :toctree: generated/
-
-   Coordinates.equals
-   Coordinates.identical
-
-Proxies
--------
-
-Coordinates that are accessed from the ``coords`` property of Dataset, DataArray
-and DataTree objects, respectively.
-
-.. autosummary::
-   :toctree: generated/
-
-   core.coordinates.DatasetCoordinates
-   core.coordinates.DataArrayCoordinates
-   core.coordinates.DataTreeCoordinates
-
-Universal functions
-===================
-
-These functions are equivalent to their NumPy versions, but for xarray
-objects backed by non-NumPy array types (e.g. ``cupy``, ``sparse``, or ``jax``),
-they will ensure that the computation is dispatched to the appropriate
-backend. You can find them in the ``xarray.ufuncs`` module:
-
-.. autosummary::
-   :toctree: generated/
-
-   ufuncs.abs
-   ufuncs.absolute
-   ufuncs.acos
-   ufuncs.acosh
-   ufuncs.arccos
-   ufuncs.arccosh
-   ufuncs.arcsin
-   ufuncs.arcsinh
-   ufuncs.arctan
-   ufuncs.arctanh
-   ufuncs.asin
-   ufuncs.asinh
-   ufuncs.atan
-   ufuncs.atanh
-   ufuncs.bitwise_count
-   ufuncs.bitwise_invert
-   ufuncs.bitwise_not
-   ufuncs.cbrt
-   ufuncs.ceil
-   ufuncs.conj
-   ufuncs.conjugate
-   ufuncs.cos
-   ufuncs.cosh
-   ufuncs.deg2rad
-   ufuncs.degrees
-   ufuncs.exp
-   ufuncs.exp2
-   ufuncs.expm1
-   ufuncs.fabs
-   ufuncs.floor
-   ufuncs.invert
-   ufuncs.isfinite
-   ufuncs.isinf
-   ufuncs.isnan
-   ufuncs.isnat
-   ufuncs.log
-   ufuncs.log10
-   ufuncs.log1p
-   ufuncs.log2
-   ufuncs.logical_not
-   ufuncs.negative
-   ufuncs.positive
-   ufuncs.rad2deg
-   ufuncs.radians
-   ufuncs.reciprocal
-   ufuncs.rint
-   ufuncs.sign
-   ufuncs.signbit
-   ufuncs.sin
-   ufuncs.sinh
-   ufuncs.spacing
-   ufuncs.sqrt
-   ufuncs.square
-   ufuncs.tan
-   ufuncs.tanh
-   ufuncs.trunc
-   ufuncs.add
-   ufuncs.arctan2
-   ufuncs.atan2
-   ufuncs.bitwise_and
-   ufuncs.bitwise_left_shift
-   ufuncs.bitwise_or
-   ufuncs.bitwise_right_shift
-   ufuncs.bitwise_xor
-   ufuncs.copysign
-   ufuncs.divide
-   ufuncs.equal
-   ufuncs.float_power
-   ufuncs.floor_divide
-   ufuncs.fmax
-   ufuncs.fmin
-   ufuncs.fmod
-   ufuncs.gcd
-   ufuncs.greater
-   ufuncs.greater_equal
-   ufuncs.heaviside
-   ufuncs.hypot
-   ufuncs.lcm
-   ufuncs.ldexp
-   ufuncs.left_shift
-   ufuncs.less
-   ufuncs.less_equal
-   ufuncs.logaddexp
-   ufuncs.logaddexp2
-   ufuncs.logical_and
-   ufuncs.logical_or
-   ufuncs.logical_xor
-   ufuncs.maximum
-   ufuncs.minimum
-   ufuncs.mod
-   ufuncs.multiply
-   ufuncs.nextafter
-   ufuncs.not_equal
-   ufuncs.pow
-   ufuncs.power
-   ufuncs.remainder
-   ufuncs.right_shift
-   ufuncs.subtract
-   ufuncs.true_divide
-   ufuncs.angle
-   ufuncs.isreal
-   ufuncs.iscomplex
-
-IO / Conversion
-===============
-
-Dataset methods
----------------
-
-.. autosummary::
-   :toctree: generated/
-
-   load_dataset
-   open_dataset
-   open_mfdataset
-   open_zarr
-   save_mfdataset
-   Dataset.as_numpy
-   Dataset.from_dataframe
-   Dataset.from_dict
-   Dataset.to_dataarray
-   Dataset.to_dataframe
-   Dataset.to_dask_dataframe
-   Dataset.to_dict
-   Dataset.to_netcdf
-   Dataset.to_pandas
-   Dataset.to_zarr
-   Dataset.chunk
-   Dataset.close
-   Dataset.compute
-   Dataset.filter_by_attrs
-   Dataset.info
-   Dataset.load
-   Dataset.persist
-   Dataset.unify_chunks
-
-DataArray methods
------------------
-
-.. autosummary::
-   :toctree: generated/
-
-   load_dataarray
-   open_dataarray
-   DataArray.as_numpy
-   DataArray.from_dict
-   DataArray.from_iris
-   DataArray.from_series
-   DataArray.to_dask_dataframe
-   DataArray.to_dataframe
-   DataArray.to_dataset
-   DataArray.to_dict
-   DataArray.to_index
-   DataArray.to_iris
-   DataArray.to_masked_array
-   DataArray.to_netcdf
-   DataArray.to_numpy
-   DataArray.to_pandas
-   DataArray.to_series
-   DataArray.to_zarr
-   DataArray.chunk
-   DataArray.close
-   DataArray.compute
-   DataArray.persist
-   DataArray.load
-   DataArray.unify_chunks
-
-DataTree methods
-----------------
-
-.. autosummary::
-   :toctree: generated/
-
-   open_datatree
-   open_groups
-   DataTree.to_dict
-   DataTree.to_netcdf
-   DataTree.to_zarr
-   DataTree.chunk
-   DataTree.load
-   DataTree.compute
-   DataTree.persist
-
-.. ..
-
-..    Missing:
-..    ``open_mfdatatree``
-
-Encoding/Decoding
-=================
-
-Coder objects
--------------
-
-.. autosummary::
-   :toctree: generated/
-
-   coders.CFDatetimeCoder
-
-Plotting
-========
-
-Dataset
--------
-
-.. autosummary::
-   :toctree: generated/
-   :template: autosummary/accessor_method.rst
-
-   Dataset.plot.scatter
-   Dataset.plot.quiver
-   Dataset.plot.streamplot
-
-DataArray
----------
-
-.. autosummary::
-   :toctree: generated/
-   :template: autosummary/accessor_callable.rst
-
-   DataArray.plot
-
-.. autosummary::
-   :toctree: generated/
-   :template: autosummary/accessor_method.rst
-
-   DataArray.plot.contourf
-   DataArray.plot.contour
-   DataArray.plot.hist
-   DataArray.plot.imshow
-   DataArray.plot.line
-   DataArray.plot.pcolormesh
-   DataArray.plot.step
-   DataArray.plot.scatter
-   DataArray.plot.surface
-
-
-Faceting
---------
-.. autosummary::
-   :toctree: generated/
-
-   plot.FacetGrid
-   plot.FacetGrid.add_colorbar
-   plot.FacetGrid.add_legend
-   plot.FacetGrid.add_quiverkey
-   plot.FacetGrid.map
-   plot.FacetGrid.map_dataarray
-   plot.FacetGrid.map_dataarray_line
-   plot.FacetGrid.map_dataset
-   plot.FacetGrid.map_plot1d
-   plot.FacetGrid.set_axis_labels
-   plot.FacetGrid.set_ticks
-   plot.FacetGrid.set_titles
-   plot.FacetGrid.set_xlabels
-   plot.FacetGrid.set_ylabels
-
-
-
-GroupBy objects
-===============
-
-.. currentmodule:: xarray.core.groupby
-
-Dataset
--------
-
-.. autosummary::
-   :toctree: generated/
-
-   DatasetGroupBy
-   DatasetGroupBy.map
-   DatasetGroupBy.reduce
-   DatasetGroupBy.assign
-   DatasetGroupBy.assign_coords
-   DatasetGroupBy.first
-   DatasetGroupBy.last
-   DatasetGroupBy.fillna
-   DatasetGroupBy.quantile
-   DatasetGroupBy.where
-   DatasetGroupBy.all
-   DatasetGroupBy.any
-   DatasetGroupBy.count
-   DatasetGroupBy.cumsum
-   DatasetGroupBy.cumprod
-   DatasetGroupBy.max
-   DatasetGroupBy.mean
-   DatasetGroupBy.median
-   DatasetGroupBy.min
-   DatasetGroupBy.prod
-   DatasetGroupBy.std
-   DatasetGroupBy.sum
-   DatasetGroupBy.var
-   DatasetGroupBy.dims
-   DatasetGroupBy.groups
-   DatasetGroupBy.shuffle_to_chunks
-
-DataArray
----------
-
-.. autosummary::
-   :toctree: generated/
-
-   DataArrayGroupBy
-   DataArrayGroupBy.map
-   DataArrayGroupBy.reduce
-   DataArrayGroupBy.assign_coords
-   DataArrayGroupBy.first
-   DataArrayGroupBy.last
-   DataArrayGroupBy.fillna
-   DataArrayGroupBy.quantile
-   DataArrayGroupBy.where
-   DataArrayGroupBy.all
-   DataArrayGroupBy.any
-   DataArrayGroupBy.count
-   DataArrayGroupBy.cumsum
-   DataArrayGroupBy.cumprod
-   DataArrayGroupBy.max
-   DataArrayGroupBy.mean
-   DataArrayGroupBy.median
-   DataArrayGroupBy.min
-   DataArrayGroupBy.prod
-   DataArrayGroupBy.std
-   DataArrayGroupBy.sum
-   DataArrayGroupBy.var
-   DataArrayGroupBy.dims
-   DataArrayGroupBy.groups
-   DataArrayGroupBy.shuffle_to_chunks
-
-Grouper Objects
----------------
-
-.. currentmodule:: xarray
-
-.. autosummary::
-   :toctree: generated/
-
-   groupers.BinGrouper
-   groupers.UniqueGrouper
-   groupers.TimeResampler
-
-
-Rolling objects
-===============
-
-.. currentmodule:: xarray.computation.rolling
-
-Dataset
--------
-
-.. autosummary::
-   :toctree: generated/
-
-   DatasetRolling
-   DatasetRolling.construct
-   DatasetRolling.reduce
-   DatasetRolling.argmax
-   DatasetRolling.argmin
-   DatasetRolling.count
-   DatasetRolling.max
-   DatasetRolling.mean
-   DatasetRolling.median
-   DatasetRolling.min
-   DatasetRolling.prod
-   DatasetRolling.std
-   DatasetRolling.sum
-   DatasetRolling.var
-
-DataArray
----------
-
-.. autosummary::
-   :toctree: generated/
-
-   DataArrayRolling
-   DataArrayRolling.__iter__
-   DataArrayRolling.construct
-   DataArrayRolling.reduce
-   DataArrayRolling.argmax
-   DataArrayRolling.argmin
-   DataArrayRolling.count
-   DataArrayRolling.max
-   DataArrayRolling.mean
-   DataArrayRolling.median
-   DataArrayRolling.min
-   DataArrayRolling.prod
-   DataArrayRolling.std
-   DataArrayRolling.sum
-   DataArrayRolling.var
-
-Coarsen objects
-===============
-
-Dataset
--------
-
-.. autosummary::
-   :toctree: generated/
-
-   DatasetCoarsen
-   DatasetCoarsen.all
-   DatasetCoarsen.any
-   DatasetCoarsen.construct
-   DatasetCoarsen.count
-   DatasetCoarsen.max
-   DatasetCoarsen.mean
-   DatasetCoarsen.median
-   DatasetCoarsen.min
-   DatasetCoarsen.prod
-   DatasetCoarsen.reduce
-   DatasetCoarsen.std
-   DatasetCoarsen.sum
-   DatasetCoarsen.var
-
-DataArray
----------
-
-.. autosummary::
-   :toctree: generated/
-
-   DataArrayCoarsen
-   DataArrayCoarsen.all
-   DataArrayCoarsen.any
-   DataArrayCoarsen.construct
-   DataArrayCoarsen.count
-   DataArrayCoarsen.max
-   DataArrayCoarsen.mean
-   DataArrayCoarsen.median
-   DataArrayCoarsen.min
-   DataArrayCoarsen.prod
-   DataArrayCoarsen.reduce
-   DataArrayCoarsen.std
-   DataArrayCoarsen.sum
-   DataArrayCoarsen.var
-
-Exponential rolling objects
-===========================
-
-.. currentmodule:: xarray.computation.rolling_exp
-
-.. autosummary::
-   :toctree: generated/
-
-   RollingExp
-   RollingExp.mean
-   RollingExp.sum
-
-Weighted objects
-================
-
-.. currentmodule:: xarray.computation.weighted
-
-Dataset
--------
-
-.. autosummary::
-   :toctree: generated/
-
-   DatasetWeighted
-   DatasetWeighted.mean
-   DatasetWeighted.quantile
-   DatasetWeighted.sum
-   DatasetWeighted.std
-   DatasetWeighted.var
-   DatasetWeighted.sum_of_weights
-   DatasetWeighted.sum_of_squares
-
-DataArray
----------
-
-.. autosummary::
-   :toctree: generated/
-
-   DataArrayWeighted
-   DataArrayWeighted.mean
-   DataArrayWeighted.quantile
-   DataArrayWeighted.sum
-   DataArrayWeighted.std
-   DataArrayWeighted.var
-   DataArrayWeighted.sum_of_weights
-   DataArrayWeighted.sum_of_squares
-
-Resample objects
-================
-
-.. currentmodule:: xarray.core.resample
-
-Dataset
--------
-
-.. autosummary::
-   :toctree: generated/
-
-   DatasetResample
-   DatasetResample.asfreq
-   DatasetResample.backfill
-   DatasetResample.interpolate
-   DatasetResample.nearest
-   DatasetResample.pad
-   DatasetResample.all
-   DatasetResample.any
-   DatasetResample.apply
-   DatasetResample.assign
-   DatasetResample.assign_coords
-   DatasetResample.bfill
-   DatasetResample.count
-   DatasetResample.ffill
-   DatasetResample.fillna
-   DatasetResample.first
-   DatasetResample.last
-   DatasetResample.map
-   DatasetResample.max
-   DatasetResample.mean
-   DatasetResample.median
-   DatasetResample.min
-   DatasetResample.prod
-   DatasetResample.quantile
-   DatasetResample.reduce
-   DatasetResample.std
-   DatasetResample.sum
-   DatasetResample.var
-   DatasetResample.where
-   DatasetResample.dims
-   DatasetResample.groups
-
-
-DataArray
----------
-
-.. autosummary::
-   :toctree: generated/
-
-   DataArrayResample
-   DataArrayResample.asfreq
-   DataArrayResample.backfill
-   DataArrayResample.interpolate
-   DataArrayResample.nearest
-   DataArrayResample.pad
-   DataArrayResample.all
-   DataArrayResample.any
-   DataArrayResample.apply
-   DataArrayResample.assign_coords
-   DataArrayResample.bfill
-   DataArrayResample.count
-   DataArrayResample.ffill
-   DataArrayResample.fillna
-   DataArrayResample.first
-   DataArrayResample.last
-   DataArrayResample.map
-   DataArrayResample.max
-   DataArrayResample.mean
-   DataArrayResample.median
-   DataArrayResample.min
-   DataArrayResample.prod
-   DataArrayResample.quantile
-   DataArrayResample.reduce
-   DataArrayResample.std
-   DataArrayResample.sum
-   DataArrayResample.var
-   DataArrayResample.where
-   DataArrayResample.dims
-   DataArrayResample.groups
-
-Accessors
-=========
-
-.. currentmodule:: xarray.core
-
-.. autosummary::
-   :toctree: generated/
-
-   accessor_dt.DatetimeAccessor
-   accessor_dt.TimedeltaAccessor
-   accessor_str.StringAccessor
-
-
-Custom Indexes
-==============
-.. currentmodule:: xarray
-
-.. autosummary::
-   :toctree: generated/
-
-   CFTimeIndex
-
-Creating custom indexes
------------------------
-.. autosummary::
-   :toctree: generated/
-
-   cftime_range
-   date_range
-   date_range_like
-
-Tutorial
-========
-
-.. autosummary::
-   :toctree: generated/
-
-   tutorial.open_dataset
-   tutorial.load_dataset
-   tutorial.open_datatree
-   tutorial.load_datatree
-
-Testing
-=======
-
-.. autosummary::
-   :toctree: generated/
-
-   testing.assert_equal
-   testing.assert_identical
-   testing.assert_allclose
-   testing.assert_chunks_equal
-
-Test that two ``DataTree`` objects are similar.
-
-.. autosummary::
-   :toctree: generated/
-
-   testing.assert_isomorphic
-   testing.assert_equal
-   testing.assert_identical
-
-Hypothesis Testing Strategies
-=============================
-
-.. currentmodule:: xarray
-
-See the :ref:`documentation page on testing <testing.hypothesis>` for a guide on how to use these strategies.
-
-.. warning::
-    These strategies should be considered highly experimental, and liable to change at any time.
-
-.. autosummary::
-   :toctree: generated/
-
-   testing.strategies.supported_dtypes
-   testing.strategies.names
-   testing.strategies.dimension_names
-   testing.strategies.dimension_sizes
-   testing.strategies.attrs
-   testing.strategies.variables
-   testing.strategies.unique_subset_of
-
-Exceptions
-==========
-
-.. autosummary::
-   :toctree: generated/
-
-   MergeError
-   SerializationWarning
-
-DataTree
---------
-
-Exceptions raised when manipulating trees.
-
-.. autosummary::
-   :toctree: generated/
-
-   xarray.TreeIsomorphismError
-   xarray.InvalidTreeError
-   xarray.NotFoundInTreeError
-
-Advanced API
-============
-
-.. autosummary::
-   :toctree: generated/
-
-   Coordinates
-   Dataset.variables
-   DataArray.variable
-   DataTree.variables
-   Variable
-   IndexVariable
-   as_variable
-   Index
-   IndexSelResult
-   Context
-   register_dataset_accessor
-   register_dataarray_accessor
-   register_datatree_accessor
-   Dataset.set_close
-   backends.BackendArray
-   backends.BackendEntrypoint
-   backends.list_engines
-   backends.refresh_engines
-
-.. ..
-
-..    Missing:
-..    ``DataTree.set_close``
-
-Default, pandas-backed indexes built-in Xarray:
-
-   indexes.PandasIndex
-   indexes.PandasMultiIndex
-
-These backends provide a low-level interface for lazily loading data from
-external file-formats or protocols, and can be manually invoked to create
-arguments for the ``load_store`` and ``dump_to_store`` Dataset methods:
-
-.. autosummary::
-   :toctree: generated/
-
-   backends.NetCDF4DataStore
-   backends.H5NetCDFStore
-   backends.PydapDataStore
-   backends.ScipyDataStore
-   backends.ZarrStore
-   backends.FileManager
-   backends.CachingFileManager
-   backends.DummyFileManager
-
-These BackendEntrypoints provide a basic interface to the most commonly
-used filetypes in the xarray universe.
-
-.. autosummary::
-   :toctree: generated/
-
-   backends.NetCDF4BackendEntrypoint
-   backends.H5netcdfBackendEntrypoint
-   backends.PydapBackendEntrypoint
-   backends.ScipyBackendEntrypoint
-   backends.StoreBackendEntrypoint
-   backends.ZarrBackendEntrypoint
-
-Deprecated / Pending Deprecation
-================================
-
-.. autosummary::
-   :toctree: generated/
-
-   Dataset.drop
-   DataArray.drop
-   Dataset.apply
-   core.groupby.DataArrayGroupBy.apply
-   core.groupby.DatasetGroupBy.apply
-
-.. autosummary::
-   :toctree: generated/
-   :template: autosummary/accessor_attribute.rst
-
-   DataArray.dt.weekofyear
-   DataArray.dt.week
+   api/top-level
+   api/dataset
+   api/dataarray
+   api/datatree
+   api/coordinates
+   api/indexes
+   api/ufuncs
+   api/io
+   api/encoding
+   api/plotting
+   api/groupby
+   api/rolling
+   api/coarsen
+   api/rolling-exp
+   api/weighted
+   api/resample
+   api/accessors
+   api/tutorial
+   api/testing
+   api/backends
+   api/exceptions
+   api/advanced
+   api/deprecated
diff -pruN 2025.03.1-8/doc/badge.json 2025.10.1-1/doc/badge.json
--- 2025.03.1-8/doc/badge.json	1970-01-01 00:00:00.000000000 +0000
+++ 2025.10.1-1/doc/badge.json	2025-10-10 10:38:05.000000000 +0000
@@ -0,0 +1,8 @@
+{
+  "label": "",
+  "message": "xarray",
+  "logoSvg": "<svg xml:space=\"preserve\" style=\"max-height: 500px\" viewBox=\"24 57.6 553.2 493.2\" y=\"0px\" x=\"0px\" xmlns:xlink=\"http://www.w3.org/1999/xlink\" xmlns=\"http://www.w3.org/2000/svg\" id=\"Layer_1\" version=\"1.1\" width=\"553.2\" height=\"493.2\"><style type=\"text/css\">.st0{fill:#216C89;}.st1{fill:#4993AA;}.st2{fill:#0F4565;}.st3{fill:#6BE8E8;}.st4{fill:#9DEEF4;}.st5{fill:#4ACFDD;}.st6{fill:#E38017;}.st7{fill:#16AFB5;}</style><g><g><polygon points=\"266.62,546.18 356.1,454.54 356.1,271.27 266.62,362.9\" class=\"st0\"/><polygon points=\"356.1,271.45 114.48,271.45 25,362.9 266.62,362.9\" class=\"st1\"/><rect height=\"183.27\" width=\"241.62\" class=\"st2\" y=\"362.9\" x=\"25\"/></g><g><polygon points=\"266.62,328.73 356.1,237.1 356.1,53.82 266.62,145.46\" class=\"st3\"/><polygon points=\"356.1,54 114.48,54 25,145.46 266.62,145.46\" class=\"st4\"/><rect height=\"183.27\" width=\"241.62\" class=\"st5\" y=\"145.46\" x=\"25\"/></g><polygon points=\"467.47,452.33 374.48,546.18 374.48,362.9 467.47,269.05\" class=\"st6\"/><polygon points=\"575,452.33 482.01,546.18 482.01,362.9 575,269.05\" class=\"st7\"/></g></svg>",
+  "logoWidth": 14,
+  "labelColor": "#4a4a4a",
+  "color": "#0e4666"
+}
diff -pruN 2025.03.1-8/doc/combined.json 2025.10.1-1/doc/combined.json
--- 2025.03.1-8/doc/combined.json	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/doc/combined.json	2025-10-10 10:38:05.000000000 +0000
@@ -10,9 +10,6 @@
     "x/0": ["saved_on_disk.h5", 8352, 32],
     "y/.zarray": "{\"chunks\":[5],\"compressor\":null,\"dtype\":\"<i8\",\"fill_value\":null,\"filters\":null,\"order\":\"C\",\"shape\":[5],\"zarr_format\":2}",
     "y/.zattrs": "{\"_ARRAY_DIMENSIONS\":[\"y\"],\"calendar\":\"proleptic_gregorian\",\"units\":\"days since 2000-01-01 00:00:00\"}",
-    "y/0": ["saved_on_disk.h5", 8384, 40],
-    "z/.zarray": "{\"chunks\":[4],\"compressor\":null,\"dtype\":\"|O\",\"fill_value\":null,\"filters\":[{\"allow_nan\":true,\"check_circular\":true,\"encoding\":\"utf-8\",\"ensure_ascii\":true,\"id\":\"json2\",\"indent\":null,\"separators\":[\",\",\":\"],\"skipkeys\":false,\"sort_keys\":true,\"strict\":true}],\"order\":\"C\",\"shape\":[4],\"zarr_format\":2}",
-    "z/0": "[\"a\",\"b\",\"c\",\"d\",\"|O\",[4]]",
-    "z/.zattrs": "{\"_ARRAY_DIMENSIONS\":[\"x\"]}"
+    "y/0": ["saved_on_disk.h5", 8384, 40]
   }
 }
diff -pruN 2025.03.1-8/doc/conf.py 2025.10.1-1/doc/conf.py
--- 2025.03.1-8/doc/conf.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/doc/conf.py	2025-10-10 10:38:05.000000000 +0000
@@ -1,17 +1,3 @@
-#
-# xarray documentation build configuration file, created by
-# sphinx-quickstart on Thu Feb  6 18:57:54 2014.
-#
-# This file is execfile()d with the current directory set to its
-# containing dir.
-#
-# Note that not all possible configuration values are present in this
-# autogenerated file.
-#
-# All configuration values have a default; values that are commented out
-# serve to show the default.
-
-
 import datetime
 import inspect
 import os
@@ -20,7 +6,7 @@ import subprocess
 import sys
 from contextlib import suppress
 from textwrap import dedent, indent
-
+import packaging.version
 import sphinx_autosummary_accessors
 import yaml
 from sphinx.application import Sphinx
@@ -58,9 +44,6 @@ except ImportError:
         ]
     )
 
-nbsphinx_allow_errors = False
-nbsphinx_requirejs_path = ""
-
 # -- General configuration ------------------------------------------------
 
 # If your documentation needs a minimal Sphinx version, state it here.
@@ -78,8 +61,7 @@ extensions = [
     "sphinx.ext.extlinks",
     "sphinx.ext.mathjax",
     "sphinx.ext.napoleon",
-    "IPython.sphinxext.ipython_directive",
-    "IPython.sphinxext.ipython_console_highlighting",
+    "jupyter_sphinx",
     "nbsphinx",
     "sphinx_autosummary_accessors",
     "sphinx.ext.linkcode",
@@ -98,14 +80,25 @@ extlinks = {
     "discussion": ("https://github.com/pydata/xarray/discussions/%s", "D%s"),
 }
 
-# sphinx-copybutton configurations
+# sphinx-copybutton configuration
 copybutton_prompt_text = r">>> |\.\.\. |\$ |In \[\d*\]: | {2,5}\.{3,}: | {5,8}: "
 copybutton_prompt_is_regexp = True
 
-# nbsphinx configurations
-
+# NBSphinx configuration
 nbsphinx_timeout = 600
 nbsphinx_execute = "always"
+nbsphinx_allow_errors = False
+nbsphinx_requirejs_path = ""
+#  png2x/retina rendering of figues in docs would also need to modify custom.css:
+# https://github.com/spatialaudio/nbsphinx/issues/464#issuecomment-652729126
+#  .rst-content .image-reference img {
+#   max-width: unset;
+#   width: 100% !important;
+#   height: auto !important;
+#  }
+# nbsphinx_execute_arguments = [
+#     "--InlineBackend.figure_formats=['png2x']",
+# ]
 nbsphinx_prolog = """
 {% set docname = env.doc2path(env.docname, base=None) %}
 
@@ -115,11 +108,11 @@ You can run this notebook in a `live ses
    :target: https://mybinder.org/v2/gh/pydata/xarray/main?urlpath=lab/tree/doc/{{ docname }}
 """
 
+# AutoDoc configuration
 autosummary_generate = True
 autodoc_typehints = "none"
 
-# Napoleon configurations
-
+# Napoleon configuration
 napoleon_google_docstring = False
 napoleon_numpy_docstring = True
 napoleon_use_param = False
@@ -187,29 +180,28 @@ napoleon_type_aliases = {
     "pd.NaT": "~pandas.NaT",
 }
 
+autodoc_type_aliases = napoleon_type_aliases  # Keep both in sync
+
 # mermaid config
-mermaid_version = "10.9.1"
+mermaid_version = "11.6.0"
 
 # Add any paths that contain templates here, relative to this directory.
 templates_path = ["_templates", sphinx_autosummary_accessors.templates_path]
 
-# The suffix of source filenames.
-# source_suffix = ".rst"
-
-
 # The master toctree document.
 master_doc = "index"
 
 remove_from_toctrees = ["generated/*"]
+# The language for content autogenerated by Sphinx.
+language = "en"
 
 # General information about the project.
 project = "xarray"
 copyright = f"2014-{datetime.datetime.now().year}, xarray Developers"
 
-# The short X.Y version.
-version = xarray.__version__.split("+")[0]
-# The full version, including alpha/beta/rc tags.
-release = xarray.__version__
+# The short Y.M.D version.
+v = packaging.version.parse(xarray.__version__)
+version = ".".join(str(p) for p in v.release)
 
 # There are two options for replacing |today|: either, you set today to some
 # non-false value, then it is used:
@@ -219,7 +211,7 @@ today_fmt = "%Y-%m-%d"
 
 # List of patterns, relative to source directory, that match files and
 # directories to ignore when looking for source files.
-exclude_patterns = ["_build", "**.ipynb_checkpoints"]
+exclude_patterns = ["_build", "debug.ipynb", "**.ipynb_checkpoints"]
 
 
 # The name of the Pygments (syntax highlighting) style to use.
@@ -229,7 +221,7 @@ pygments_style = "sphinx"
 # -- Options for HTML output ----------------------------------------------
 # The theme to use for HTML and HTML Help pages.  See the documentation for
 # a list of builtin themes.
-html_theme = "sphinx_book_theme"
+html_theme = "pydata_sphinx_theme"
 html_title = ""
 
 html_context = {
@@ -239,29 +231,37 @@ html_context = {
     "doc_path": "doc",
 }
 
-# Theme options are theme-specific and customize the look and feel of a theme
-# further.  For a list of options available for each theme, see the
-# documentation.
-html_theme_options = dict(
-    # analytics_id=''  this is configured in rtfd.io
-    # canonical_url="",
-    repository_url="https://github.com/pydata/xarray",
-    repository_branch="main",
-    navigation_with_keys=False,  # pydata/pydata-sphinx-theme#1492
-    navigation_depth=4,
-    path_to_docs="doc",
-    use_edit_page_button=True,
-    use_repository_button=True,
-    use_issues_button=True,
-    home_page_in_toc=False,
-    extra_footer="""<p>Xarray is a fiscally sponsored project of <a href="https://numfocus.org">NumFOCUS</a>,
-    a nonprofit dedicated to supporting the open-source scientific computing community.<br>
-    Theme by the <a href="https://ebp.jupyterbook.org">Executable Book Project</a></p>""",
-    twitter_url="https://twitter.com/xarray_dev",
-    icon_links=[],  # workaround for pydata/pydata-sphinx-theme#1220
-    # announcement="<a href='https://forms.gle/KEq7WviCdz9xTaJX6'>Xarray's 2024 User Survey is live now. Please take ~5 minutes to fill it out and help us improve Xarray.</a>",
-)
-
+# https://pydata-sphinx-theme.readthedocs.io/en/stable/user_guide/layout.html#references
+html_theme_options = {
+    #"announcement":"🍾 <a href='https://github.com/pydata/xarray/discussions/8462'>Xarray is now 10 years old!</a> 🎉",
+    "logo": {"image_dark": "https://docs.xarray.dev/en/stable/_static/logos/Xarray_Logo_FullColor_InverseRGB_Final.svg"},
+    "github_url":"https://github.com/pydata/xarray",
+    "show_version_warning_banner":True,
+    "use_edit_page_button":True,
+    "header_links_before_dropdown": 8,
+    "navbar_align": "left",
+    "footer_center":["last-updated"],
+    # Instead of adding these to the header bar they are linked in 'getting help' and 'contributing'
+    # "icon_links": [
+    # {
+    #     "name": "Discord",
+    #     "url": "https://discord.com/invite/wEKPCt4PDu",
+    #     "icon": "fa-brands fa-discord",
+    # },
+    # {
+    #     "name": "X",
+    #     "url": "https://x.com/xarray_dev",
+    #     "icon": "fa-brands fa-x-twitter",
+    # },
+    # {
+    #     "name": "Bluesky",
+    #     "url": "https://bsky.app/profile/xarray.bsky.social",
+    #     "icon": "fa-brands fa-bluesky",
+    # },
+    # ]
+}
+# pydata_sphinx_theme use_edit_page_button with github link seems better
+html_show_sourcelink = False
 
 # The name of an image file (relative to this directory) to place at the top
 # of the sidebar.
@@ -282,14 +282,13 @@ html_css_files = ["style.css"]
 # configuration for sphinxext.opengraph
 ogp_site_url = "https://docs.xarray.dev/en/latest/"
 ogp_image = "https://docs.xarray.dev/en/stable/_static/logos/Xarray_Logo_RGB_Final.png"
-ogp_custom_meta_tags = [
+ogp_custom_meta_tags = (
     '<meta name="twitter:card" content="summary_large_image" />',
     '<meta property="twitter:site" content="@xarray_dev" />',
     '<meta name="image" property="og:image" content="https://docs.xarray.dev/en/stable/_static/logos/Xarray_Logo_RGB_Final.png" />',
-]
+)
 
 # Redirects for pages that were moved to new locations
-
 rediraffe_redirects = {
     "terminology.rst": "user-guide/terminology.rst",
     "data-structures.rst": "user-guide/data-structures.rst",
@@ -306,23 +305,15 @@ rediraffe_redirects = {
     "dask.rst": "user-guide/dask.rst",
     "plotting.rst": "user-guide/plotting.rst",
     "duckarrays.rst": "user-guide/duckarrays.rst",
-    "related-projects.rst": "ecosystem.rst",
-    "faq.rst": "getting-started-guide/faq.rst",
+    "related-projects.rst": "user-guide/ecosystem.rst",
+    "faq.rst": "get-help/faq.rst",
     "why-xarray.rst": "getting-started-guide/why-xarray.rst",
     "installing.rst": "getting-started-guide/installing.rst",
     "quick-overview.rst": "getting-started-guide/quick-overview.rst",
+    "contributing.rst": "contribute/contributing.rst",
+    "developers-meeting.rst": "contribute/developers-meeting.rst",
 }
 
-# Sometimes the savefig directory doesn't exist and needs to be created
-# https://github.com/ipython/ipython/issues/8733
-# becomes obsolete when we can pin ipython>=5.2; see ci/requirements/doc.yml
-ipython_savefig_dir = os.path.join(
-    os.path.dirname(os.path.abspath(__file__)), "_build", "html", "_static"
-)
-if not os.path.exists(ipython_savefig_dir):
-    os.makedirs(ipython_savefig_dir)
-
-
 # If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
 # using the given strftime format.
 html_last_updated_fmt = today_fmt
@@ -348,9 +339,9 @@ intersphinx_mapping = {
     "sparse": ("https://sparse.pydata.org/en/latest/", None),
     "xarray-tutorial": ("https://tutorial.xarray.dev/", None),
     "zarr": ("https://zarr.readthedocs.io/en/stable/", None),
+    "xarray-lmfit": ("https://xarray-lmfit.readthedocs.io/stable", None),
 }
 
-
 # based on numpy doc/source/conf.py
 def linkcode_resolve(domain, info):
     """
diff -pruN 2025.03.1-8/doc/contribute/contributing.rst 2025.10.1-1/doc/contribute/contributing.rst
--- 2025.03.1-8/doc/contribute/contributing.rst	1970-01-01 00:00:00.000000000 +0000
+++ 2025.10.1-1/doc/contribute/contributing.rst	2025-10-10 10:38:05.000000000 +0000
@@ -0,0 +1,1082 @@
+.. _contributing:
+
+**********************
+Contributing to xarray
+**********************
+
+.. note::
+
+  Large parts of this document came from the `Pandas Contributing
+  Guide <https://pandas.pydata.org/pandas-docs/stable/development/contributing.html>`_.
+
+Overview
+========
+
+We welcome your skills and enthusiasm at the xarray project!. There are numerous opportunities to
+contribute beyond just writing code.
+All contributions, including bug reports, bug fixes, documentation improvements, enhancement suggestions,
+and other ideas are welcome.
+
+If you have any questions on the process or how to fix something feel free to ask us!
+The recommended place to ask a question is  on `GitHub Discussions <https://github.com/pydata/xarray/discussions>`_
+, but we also have a `Discord <https://discord.com/invite/wEKPCt4PDu>`_ and a
+`mailing list <https://groups.google.com/g/xarray>`_. There is also a
+`"python-xarray" tag on Stack Overflow <https://stackoverflow.com/questions/tagged/python-xarray>`_ which we monitor for questions.
+
+We also have a biweekly community call, details of which are announced on the
+`Developers meeting <https://docs.xarray.dev/en/stable/developers-meeting.html>`_.
+You are very welcome to join! Though we would love to hear from you, there is no expectation to
+contribute during the meeting either - you are always welcome to just sit in and listen.
+
+This project is a community effort, and everyone is welcome to contribute. Everyone within the community
+is expected to abide by our `code of conduct <https://github.com/pydata/xarray/blob/main/CODE_OF_CONDUCT.md>`_.
+
+Where to start?
+===============
+
+If you are brand new to *xarray* or open-source development, we recommend going
+through the `GitHub "issues" tab <https://github.com/pydata/xarray/issues>`_
+to find issues that interest you.
+Some issues are particularly suited for new contributors by the label `Documentation <https://github.com/pydata/xarray/labels/topic-documentation>`__
+and `good first issue
+<https://github.com/pydata/xarray/labels/contrib-good-first-issue>`_ where you could start out.
+These are well documented issues, that do not require a deep understanding of the internals of xarray.
+
+Once you've found an interesting issue, you can return here to get your development environment setup.
+The xarray project does not assign issues. Issues are "assigned" by opening a Pull Request(PR).
+
+.. _contributing.bug_reports:
+
+Bug reports and enhancement requests
+====================================
+
+Bug reports are an important part of making *xarray* more stable. Having a complete bug
+report will allow others to reproduce the bug and provide insight into fixing.
+
+Trying out the bug-producing code on the *main* branch is often a worthwhile exercise
+to confirm that the bug still exists. It is also worth searching existing bug reports and
+pull requests to see if the issue has already been reported and/or fixed.
+
+Submitting a bug report
+-----------------------
+
+If you find a bug in the code or documentation, do not hesitate to submit a ticket to the
+`Issue Tracker <https://github.com/pydata/xarray/issues>`_.
+You are also welcome to post feature requests or pull requests.
+
+If you are reporting a bug, please use the provided template which includes the following:
+
+#. Include a short, self-contained Python snippet reproducing the problem.
+   You can format the code nicely by using `GitHub Flavored Markdown
+   <https://github.github.com/github-flavored-markdown/>`_::
+
+      ```python
+      import xarray as xr
+
+      ds = xr.Dataset(...)
+
+      ...
+      ```
+
+#. Include the full version string of *xarray* and its dependencies. You can use the
+   built in function::
+
+      ```python
+      import xarray as xr
+
+      xr.show_versions()
+
+      ...
+      ```
+
+#. Explain why the current behavior is wrong/not desired and what you expect instead.
+
+The issue will then show up to the *xarray* community and be open to comments/ideas from others.
+
+See this `stackoverflow article for tips on writing a good bug report <https://stackoverflow.com/help/mcve>`_ .
+
+
+.. _contributing.github:
+
+Now that you have an issue you want to fix, enhancement to add, or documentation
+to improve, you need to learn how to work with GitHub and the *xarray* code base.
+
+.. _contributing.version_control:
+
+Version control, Git, and GitHub
+================================
+
+The code is hosted on `GitHub <https://www.github.com/pydata/xarray>`_. To
+contribute you will need to sign up for a `free GitHub account
+<https://github.com/signup/free>`_. We use `Git <https://git-scm.com/>`_ for
+version control to allow many people to work together on the project.
+
+Some great resources for learning Git:
+
+* the `GitHub help pages <https://help.github.com/>`_.
+* the `NumPy's documentation <https://numpy.org/doc/stable/dev/index.html>`_.
+* Matthew Brett's `Pydagogue <https://matthew-brett.github.io/pydagogue/>`_.
+
+Getting started with Git
+------------------------
+
+`GitHub has instructions for setting up Git <https://help.github.com/set-up-git-redirect>`__ including installing git,
+setting up your SSH key, and configuring git.  All these steps need to be completed before
+you can work seamlessly between your local repository and GitHub.
+
+.. note::
+
+    The following instructions assume you want to learn how to interact with github via the git command-line utility,
+    but contributors who are new to git may find it easier to use other tools instead such as
+    `Github Desktop <https://desktop.github.com/>`_.
+
+.. _contributing.dev_workflow:
+
+Development workflow
+====================
+
+To keep your work well organized, with readable history, and in turn make it easier for project
+maintainers to see what you've done, and why you did it, we recommend you to follow workflow:
+
+1. `Create an account <https://github.com/>`_ on GitHub if you do not already have one.
+
+2. You will need your own fork to work on the code. Go to the `xarray project
+   page <https://github.com/pydata/xarray>`_ and hit the ``Fork`` button near the top of the page.
+   This creates a copy of the code under your account on the GitHub server.
+
+3. Clone your fork to your machine::
+
+    git clone https://github.com/your-user-name/xarray.git
+    cd xarray
+    git remote add upstream https://github.com/pydata/xarray.git
+
+   This creates the directory ``xarray`` and connects your repository to
+   the upstream (main project) *xarray* repository.
+
+4. Copy tags across from the xarray repository::
+
+    git fetch --tags upstream
+
+   This will ensure that when you create a development environment a reasonable version number is created.
+
+.. _contributing.dev_env:
+
+Creating a development environment
+----------------------------------
+
+To test out code changes locally, you'll need to build *xarray* from source, which
+requires a Python environment. If you're making documentation changes, you can
+skip to :ref:`contributing.documentation` but you won't be able to build the
+documentation locally before pushing your changes.
+
+.. note::
+
+    For small changes, such as fixing a typo, you don't necessarily need to build and test xarray locally.
+    If you make your changes then :ref:`commit and push them to a new branch <contributing.changes>`,
+    xarray's automated :ref:`continuous integration tests <contributing.ci>` will run and check your code in various ways.
+    You can then try to fix these problems by committing and pushing more commits to the same branch.
+
+    You can also avoid building the documentation locally by instead :ref:`viewing the updated documentation via the CI <contributing.pr>`.
+
+    To speed up this feedback loop or for more complex development tasks you should build and test xarray locally.
+
+
+.. _contributing.dev_python:
+
+Creating a Python Environment
+-----------------------------
+
+Before starting any development, you'll need to create an isolated xarray
+development environment:
+
+- Install either `Anaconda <https://www.anaconda.com/download/>`_ or `miniconda
+  <https://conda.io/miniconda.html>`_
+- Make sure your conda is up to date (``conda update conda``)
+- Make sure that you have :ref:`cloned the repository <contributing.dev_workflow>`
+- ``cd`` to the *xarray* source directory
+
+We'll now kick off a two-step process:
+
+1. Install the build dependencies
+2. Build and install xarray
+
+.. code-block:: sh
+
+   # Create and activate the build environment
+   conda create -c conda-forge -n xarray-tests python=3.11
+
+   # This is for Linux and MacOS
+   conda env update -f ci/requirements/environment.yml
+
+   # On windows, use environment-windows.yml instead
+   conda env update -f ci/requirements/environment-windows.yml
+
+   conda activate xarray-tests
+
+   # or with older versions of Anaconda:
+   source activate xarray-tests
+
+   # Build and install xarray
+   pip install -e .
+
+At this point you should be able to import *xarray* from your locally
+built version:
+
+.. code-block:: sh
+
+   $ python  # start an interpreter
+   >>> import xarray
+   >>> xarray.__version__
+   '2025.7.2.dev14+g5ce69b2b.d20250725'
+
+This will create the new environment, and not touch any of your existing environments,
+nor any existing Python installation.
+
+To view your environments::
+
+      conda info -e
+
+To return to your root environment::
+
+      conda deactivate
+
+See the full `conda docs here <https://conda.pydata.org/docs>`__.
+
+Install pre-commit hooks
+------------------------
+
+We highly recommend that you setup `pre-commit <https://pre-commit.com/>`_ hooks to automatically
+run all the above tools every time you make a git commit. To install the hooks::
+
+    python -m pip install pre-commit
+    pre-commit install
+
+This can be done by running: ::
+
+    pre-commit run
+
+from the root of the xarray repository. You can skip the pre-commit checks with
+``git commit --no-verify``.
+
+
+Update the ``main`` branch
+--------------------------
+
+First make sure you have :ref:`created a development environment <contributing.dev_env>`.
+
+Before starting a new set of changes, fetch all changes from ``upstream/main``, and start a new
+feature branch from that. From time to time you should fetch the upstream changes from GitHub: ::
+
+    git fetch --tags upstream
+    git merge upstream/main
+
+This will combine your commits with the latest *xarray* git ``main``.  If this
+leads to merge conflicts, you must resolve these before submitting your pull
+request.  If you have uncommitted changes, you will need to ``git stash`` them
+prior to updating.  This will effectively store your changes, which can be
+reapplied after updating.
+
+If the *xarray* ``main`` branch version has updated since you last fetched changes,
+you may also wish to reinstall xarray so that the pip version reflects the *xarray*
+version::
+
+    pip install -e .
+
+Create a new feature branch
+---------------------------
+
+Create a branch to save your changes, even before you start making changes. You want your
+``main branch`` to contain only production-ready code::
+
+    git checkout -b shiny-new-feature
+
+This changes your working directory to the ``shiny-new-feature`` branch.  Keep any changes in this
+branch specific to one bug or feature so it is clear what the branch brings to *xarray*. You can have
+many "shiny-new-features" and switch in between them using the ``git checkout`` command.
+
+Generally, you will want to keep your feature branches on your public GitHub fork of xarray. To do this,
+you ``git push`` this new branch up to your GitHub repo. Generally (if you followed the instructions in
+these pages, and by default), git will have a link to your fork of the GitHub repo, called ``origin``.
+You push up to your own fork with: ::
+
+    git push origin shiny-new-feature
+
+In git >= 1.7 you can ensure that the link is correctly set by using the ``--set-upstream`` option: ::
+
+    git push --set-upstream origin shiny-new-feature
+
+From now on git will know that ``shiny-new-feature`` is related to the ``shiny-new-feature branch`` in the GitHub repo.
+
+The editing workflow
+--------------------
+
+1. Make some changes
+
+2. See which files have changed with ``git status``. You'll see a listing like this one: ::
+
+    # On branch shiny-new-feature
+    # Changed but not updated:
+    #   (use "git add <file>..." to update what will be committed)
+    #   (use "git checkout -- <file>..." to discard changes in working directory)
+    #
+    #  modified:   README
+
+3. Check what the actual changes are with ``git diff``.
+
+4. Build the `documentation <https://docs.xarray.dev/en/stable/contributing.html#building-the-documentation>`__
+for the documentation changes.
+
+5. `Run the test suite <https://docs.xarray.dev/en/stable/contributing.html#running-the-test-suite>`_ for code changes.
+
+Commit and push your changes
+----------------------------
+
+1. To commit all modified files into the local copy of your repo, do ``git commit -am 'A commit message'``.
+
+2. To push the changes up to your forked repo on GitHub, do a ``git push``.
+
+Open a pull request
+-------------------
+
+When you're ready or need feedback on your code, open a Pull Request (PR) so that the xarray developers can
+give feedback and eventually include your suggested code into the ``main`` branch.
+`Pull requests (PRs) on GitHub <https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/proposing-changes-to-your-work-with-pull-requests/about-pull-requests>`_
+are the mechanism for contributing to xarray's code and documentation.
+
+Enter a title for the set of changes with some explanation of what you've done.
+Follow the PR template, which looks like this. ::
+
+    [ ]Closes #xxxx
+    [ ]Tests added
+    [ ]User visible changes (including notable bug fixes) are documented in whats-new.rst
+    [ ]New functions/methods are listed in api.rst
+
+Mention anything you'd like particular attention for - such as a complicated change or some code you are not happy with.
+If you don't think your request is ready to be merged, just say so in your pull request message and use
+the "Draft PR" feature of GitHub. This is a good way of getting some preliminary code review.
+
+.. _contributing.documentation:
+
+Contributing to the documentation
+=================================
+
+If you're not the developer type, contributing to the documentation is still of
+huge value. You don't even have to be an expert on *xarray* to do so! In fact,
+there are sections of the docs that are worse off after being written by
+experts. If something in the docs doesn't make sense to you, updating the
+relevant section after you figure it out is a great way to ensure it will help
+the next person.
+
+.. contents:: Documentation:
+   :local:
+
+
+About the *xarray* documentation
+--------------------------------
+
+The documentation is written in **reStructuredText**, which is almost like writing
+in plain English, and built using `Sphinx <https://www.sphinx-doc.org/>`__. The
+Sphinx Documentation has an excellent `introduction to reST
+<https://www.sphinx-doc.org/en/master/usage/restructuredtext/basics.html>`__. Review the Sphinx docs to perform more
+complex changes to the documentation as well.
+
+Some other important things to know about the docs:
+
+- The *xarray* documentation consists of two parts: the docstrings in the code
+  itself and the docs in this folder ``xarray/doc/``.
+
+  The docstrings are meant to provide a clear explanation of the usage of the
+  individual functions, while the documentation in this folder consists of
+  tutorial-like overviews per topic together with some other information
+  (what's new, installation, etc).
+
+- The docstrings follow the **NumPy Docstring Standard**, which is used widely
+  in the Scientific Python community. This standard specifies the format of
+  the different sections of the docstring. Refer to the `documentation for the Numpy docstring format
+  <https://numpydoc.readthedocs.io/en/latest/format.html#docstring-standard>`_
+  for a detailed explanation, or look at some of the existing functions to
+  extend it in a similar manner.
+
+- The documentation makes heavy use of the `jupyter-sphinx extension
+  <https://jupyter-sphinx.readthedocs.io>`_.
+  The ``jupyter-execute`` directive lets you put code in the documentation which will be run
+  during the doc build. For example:
+
+  .. code:: rst
+
+      .. jupyter-execute::
+
+          x = 2
+          x**3
+
+  will be rendered as:
+
+  .. jupyter-execute::
+
+       x = 2
+       x**3
+
+  Almost all code examples in the docs are run (and the output saved) during the
+  doc build. This approach means that code examples will always be up to date,
+  but it does make building the docs a bit more complex.
+
+- Our API documentation in ``doc/api.rst`` houses the auto-generated
+  documentation from the docstrings. For classes, there are a few subtleties
+  around controlling which methods and attributes have pages auto-generated.
+
+  Every method should be included in a ``toctree`` in ``api.rst``, else Sphinx
+  will emit a warning.
+
+
+How to build the *xarray* documentation
+---------------------------------------
+
+Requirements
+~~~~~~~~~~~~
+Make sure to follow the instructions on :ref:`creating a development environment<contributing.dev_env>` above, but
+to build the docs you need to use the environment file ``ci/requirements/doc.yml``.
+You should also use this environment and these steps if you want to view changes you've made to the docstrings.
+
+.. code-block:: sh
+
+    # Create and activate the docs environment
+    conda env create -f ci/requirements/doc.yml
+    conda activate xarray-docs
+
+    # or with older versions of Anaconda:
+    source activate xarray-docs
+
+    # Build and install a local, editable version of xarray
+    pip install -e .
+
+Building the documentation
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+To build the documentation run::
+
+    cd doc/
+    make html
+
+Then you can find the HTML output files in the folder ``xarray/doc/_build/html/``.
+
+To see what the documentation now looks like with your changes, you can view the HTML build locally by opening the files in your local browser.
+For example, if you normally use Google Chrome as your browser, you could enter::
+
+    google-chrome _build/html/quick-overview.html
+
+in the terminal, running from within the ``doc/`` folder.
+You should now see a new tab pop open in your local browser showing the ``quick-overview`` page of the documentation.
+The different pages of this local build of the documentation are linked together,
+so you can browse the whole documentation by following links the same way you would on the officially-hosted xarray docs site.
+
+The first time you build the docs, it will take quite a while because it has to run
+all the code examples and build all the generated docstring pages. In subsequent
+evocations, Sphinx will try to only build the pages that have been modified.
+
+If you want to do a full clean build, do::
+
+    make clean
+    make html
+
+Writing ReST pages
+------------------
+
+Most documentation is either in the docstrings of individual classes and methods, in explicit
+``.rst`` files, or in examples and tutorials. All of these use the
+`ReST <https://docutils.sourceforge.io/rst.html>`_ syntax and are processed by
+`Sphinx <https://www.sphinx-doc.org/en/master/>`_.
+
+This section contains additional information and conventions how ReST is used in the
+xarray documentation.
+
+Section formatting
+~~~~~~~~~~~~~~~~~~
+
+We aim to follow the recommendations from the
+`Python documentation <https://devguide.python.org/documentation/start-documenting/index.html#sections>`_
+and the `Sphinx reStructuredText documentation <https://www.sphinx-doc.org/en/master/usage/restructuredtext/basics.html#sections>`_
+for section markup characters,
+
+- ``*`` with overline, for chapters
+
+- ``=``, for heading
+
+- ``-``, for sections
+
+- ``~``, for subsections
+
+- ``**`` text ``**``, for **bold** text
+
+Referring to other documents and sections
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+`Sphinx  <https://www.sphinx-doc.org/en/master/>`_ allows internal
+`references <https://www.sphinx-doc.org/en/master/usage/restructuredtext/roles.html>`_ between documents.
+
+Documents can be linked with the ``:doc:`` directive:
+
+::
+
+    See the :doc:`/getting-started-guide/installing`
+
+    See the :doc:`/getting-started-guide/quick-overview`
+
+will render as:
+
+See the `Installation <https://docs.xarray.dev/en/stable/getting-started-guide/installing.html>`_
+
+See the `Quick Overview <https://docs.xarray.dev/en/stable/getting-started-guide/quick-overview.html>`_
+
+Including figures and files
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Image files can be directly included in pages with the ``image::`` directive.
+
+.. _contributing.code:
+
+Contributing to the code base
+=============================
+
+.. contents:: Code Base:
+   :local:
+
+Code standards
+--------------
+
+Writing good code is not just about what you write. It is also about *how* you
+write it. During :ref:`Continuous Integration <contributing.ci>` testing, several
+tools will be run to check your code for stylistic errors.
+Generating any warnings will cause the test to fail.
+Thus, good style is a requirement for submitting code to *xarray*.
+
+In addition, because a lot of people use our library, it is important that we
+do not make sudden changes to the code that could have the potential to break
+a lot of user code as a result, that is, we need it to be as *backwards compatible*
+as possible to avoid mass breakages.
+
+Code Formatting
+~~~~~~~~~~~~~~~
+
+xarray uses several tools to ensure a consistent code format throughout the project:
+
+- `ruff <https://github.com/astral-sh/ruff>`_ for formatting, code quality checks and standardized order in imports, and
+- `mypy <https://mypy-lang.org/>`_ for static type checking on `type hints
+  <https://docs.python.org/3/library/typing.html>`_.
+
+We highly recommend that you setup `pre-commit hooks <https://pre-commit.com/>`_
+to automatically run all the above tools every time you make a git commit. This
+can be done by running::
+
+   pre-commit install
+
+from the root of the xarray repository. You can skip the pre-commit checks
+with ``git commit --no-verify``.
+
+
+Backwards Compatibility
+~~~~~~~~~~~~~~~~~~~~~~~
+
+Please try to maintain backwards compatibility. *xarray* has a growing number of users with
+lots of existing code, so don't break it if at all possible.  If you think breakage is
+required, clearly state why as part of the pull request.
+
+Be especially careful when changing function and method signatures, because any change
+may require a deprecation warning. For example, if your pull request means that the
+argument ``old_arg`` to ``func`` is no longer valid, instead of simply raising an error if
+a user passes ``old_arg``, we would instead catch it:
+
+.. code-block:: python
+
+    def func(new_arg, old_arg=None):
+        if old_arg is not None:
+            from xarray.core.utils import emit_user_level_warning
+
+            emit_user_level_warning(
+                "`old_arg` has been deprecated, and in the future will raise an error."
+                "Please use `new_arg` from now on.",
+                DeprecationWarning,
+            )
+
+            # Still do what the user intended here
+
+This temporary check would then be removed in a subsequent version of xarray.
+This process of first warning users before actually breaking their code is known as a
+"deprecation cycle", and makes changes significantly easier to handle both for users
+of xarray, and for developers of other libraries that depend on xarray.
+
+
+.. _contributing.ci:
+
+Testing With Continuous Integration
+-----------------------------------
+
+The *xarray* test suite runs automatically via the
+`GitHub Actions <https://docs.github.com/en/free-pro-team@latest/actions>`__,
+continuous integration service, once your pull request is submitted.
+
+A pull-request will be considered for merging when you have an all 'green' build. If any
+tests are failing, then you will get a red 'X', where you can click through to see the
+individual failed tests. This is an example of a green build.
+
+.. image:: ../_static/ci.png
+
+.. note::
+
+   Each time you push to your PR branch, a new run of the tests will be
+   triggered on the CI. If they haven't already finished, tests for any older
+   commits on the same branch will be automatically cancelled.
+
+.. _contributing.tdd:
+
+
+Test-driven development/code writing
+------------------------------------
+
+*xarray* is serious about testing and strongly encourages contributors to embrace
+`test-driven development (TDD) <https://en.wikipedia.org/wiki/Test-driven_development>`_.
+This development process "relies on the repetition of a very short development cycle:
+first the developer writes an (initially failing) automated test case that defines a desired
+improvement or new function, then produces the minimum amount of code to pass that test."
+So, before actually writing any code, you should write your tests.  Often the test can be
+taken from the original GitHub issue.  However, it is always worth considering additional
+use cases and writing corresponding tests.
+
+Adding tests is one of the most common requests after code is pushed to *xarray*.  Therefore,
+it is worth getting in the habit of writing tests ahead of time so that this is never an issue.
+
+Like many packages, *xarray* uses `pytest
+<https://doc.pytest.org/en/latest/>`_ and the convenient
+extensions in `numpy.testing
+<https://numpy.org/doc/stable/reference/routines.testing.html>`_.
+
+Writing tests
+~~~~~~~~~~~~~
+
+All tests should go into the ``tests`` subdirectory of the specific package.
+This folder contains many current examples of tests, and we suggest looking to these for
+inspiration.
+
+The ``xarray.testing`` module has many special ``assert`` functions that
+make it easier to make statements about whether DataArray or Dataset objects are
+equivalent. The easiest way to verify that your code is correct is to
+explicitly construct the result you expect, then compare the actual result to
+the expected correct result::
+
+    def test_constructor_from_0d():
+        expected = Dataset({None: ([], 0)})[None]
+        actual = DataArray(0)
+        assert_identical(expected, actual)
+
+Transitioning to ``pytest``
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+*xarray* existing test structure is *mostly* class-based, meaning that you will
+typically find tests wrapped in a class.
+
+.. code-block:: python
+
+    class TestReallyCoolFeature: ...
+
+Going forward, we are moving to a more *functional* style using the
+`pytest <https://doc.pytest.org/en/latest/>`__ framework, which offers a richer
+testing framework that will facilitate testing and developing. Thus, instead of
+writing test classes, we will write test functions like this:
+
+.. code-block:: python
+
+    def test_really_cool_feature(): ...
+
+Using ``pytest``
+~~~~~~~~~~~~~~~~
+
+Here is an example of a self-contained set of tests that illustrate multiple
+features that we like to use.
+
+- functional style: tests are like ``test_*`` and *only* take arguments that are either
+  fixtures or parameters
+- ``pytest.mark`` can be used to set metadata on test functions, e.g. ``skip`` or ``xfail``.
+- using ``parametrize``: allow testing of multiple cases
+- to set a mark on a parameter, ``pytest.param(..., marks=...)`` syntax should be used
+- ``fixture``, code for object construction, on a per-test basis
+- using bare ``assert`` for scalars and truth-testing
+- ``assert_equal`` and ``assert_identical`` from the ``xarray.testing`` module for xarray object comparisons.
+- the typical pattern of constructing an ``expected`` and comparing versus the ``result``
+
+We would name this file ``test_cool_feature.py`` and put in an appropriate place in the
+``xarray/tests/`` structure.
+
+.. code-block:: python
+
+    import pytest
+    import numpy as np
+    import xarray as xr
+    from xarray.testing import assert_equal
+
+
+    @pytest.mark.parametrize("dtype", ["int8", "int16", "int32", "int64"])
+    def test_dtypes(dtype):
+        assert str(np.dtype(dtype)) == dtype
+
+
+    @pytest.mark.parametrize(
+        "dtype",
+        [
+            "float32",
+            pytest.param("int16", marks=pytest.mark.skip),
+            pytest.param(
+                "int32", marks=pytest.mark.xfail(reason="to show how it works")
+            ),
+        ],
+    )
+    def test_mark(dtype):
+        assert str(np.dtype(dtype)) == "float32"
+
+
+    @pytest.fixture
+    def dataarray():
+        return xr.DataArray([1, 2, 3])
+
+
+    @pytest.fixture(params=["int8", "int16", "int32", "int64"])
+    def dtype(request):
+        return request.param
+
+
+    def test_series(dataarray, dtype):
+        result = dataarray.astype(dtype)
+        assert result.dtype == dtype
+
+        expected = xr.DataArray(np.array([1, 2, 3], dtype=dtype))
+        assert_equal(result, expected)
+
+
+
+A test run of this yields
+
+.. code-block:: shell
+
+    ((xarray) $ pytest test_cool_feature.py -v
+    ================================= test session starts ==================================
+    platform darwin -- Python 3.10.6, pytest-7.2.0, pluggy-1.0.0 --
+    cachedir: .pytest_cache
+    plugins: hypothesis-6.56.3, cov-4.0.0
+    collected 11 items
+
+    xarray/tests/test_cool_feature.py::test_dtypes[int8] PASSED                       [  9%]
+    xarray/tests/test_cool_feature.py::test_dtypes[int16] PASSED                      [ 18%]
+    xarray/tests/test_cool_feature.py::test_dtypes[int32] PASSED                      [ 27%]
+    xarray/tests/test_cool_feature.py::test_dtypes[int64] PASSED                      [ 36%]
+    xarray/tests/test_cool_feature.py::test_mark[float32] PASSED                      [ 45%]
+    xarray/tests/test_cool_feature.py::test_mark[int16] SKIPPED (unconditional skip)  [ 54%]
+    xarray/tests/test_cool_feature.py::test_mark[int32] XFAIL (to show how it works)  [ 63%]
+    xarray/tests/test_cool_feature.py::test_series[int8] PASSED                       [ 72%]
+    xarray/tests/test_cool_feature.py::test_series[int16] PASSED                      [ 81%]
+    xarray/tests/test_cool_feature.py::test_series[int32] PASSED                      [ 90%]
+    xarray/tests/test_cool_feature.py::test_series[int64] PASSED                      [100%]
+
+
+    ==================== 9 passed, 1 skipped, 1 xfailed in 1.83 seconds ====================
+
+Tests that we have ``parametrized`` are now accessible via the test name, for
+example we could run these with ``-k int8`` to sub-select *only* those tests
+which match ``int8``.
+
+
+.. code-block:: shell
+
+   ((xarray) bash-3.2$ pytest  test_cool_feature.py  -v -k int8
+   ================================== test session starts ==================================
+   platform darwin -- Python 3.10.6, pytest-7.2.0, pluggy-1.0.0 --
+   cachedir: .pytest_cache
+   plugins: hypothesis-6.56.3, cov-4.0.0
+   collected 11 items
+
+   test_cool_feature.py::test_dtypes[int8] PASSED
+   test_cool_feature.py::test_series[int8] PASSED
+
+
+Running the test suite
+----------------------
+
+The tests can then be run directly inside your Git clone (without having to
+install *xarray*) by typing::
+
+    pytest xarray
+
+The tests suite is exhaustive and takes a few minutes.  Often it is
+worth running only a subset of tests first around your changes before running the
+entire suite.
+
+The easiest way to do this is with::
+
+    pytest xarray/path/to/test.py -k regex_matching_test_name
+
+Or with one of the following constructs::
+
+    pytest xarray/tests/[test-module].py
+    pytest xarray/tests/[test-module].py::[TestClass]
+    pytest xarray/tests/[test-module].py::[TestClass]::[test_method]
+
+Using `pytest-xdist <https://pypi.python.org/pypi/pytest-xdist>`_, one can
+speed up local testing on multicore machines, by running pytest with the optional -n argument::
+
+    pytest xarray -n 4
+
+This can significantly reduce the time it takes to locally run tests before
+submitting a pull request.
+
+For more, see the `pytest <https://doc.pytest.org/en/latest/>`_ documentation.
+
+Running the performance test suite
+----------------------------------
+
+Performance matters and it is worth considering whether your code has introduced
+performance regressions.  *xarray* is starting to write a suite of benchmarking tests
+using `asv <https://github.com/airspeed-velocity/asv>`__
+to enable easy monitoring of the performance of critical *xarray* operations.
+These benchmarks are all found in the ``xarray/asv_bench`` directory.
+
+To use all features of asv, you will need either ``conda`` or
+``virtualenv``. For more details please check the `asv installation
+webpage <https://asv.readthedocs.io/en/stable/installing.html>`_.
+
+To install asv::
+
+    python -m pip install asv
+
+If you need to run a benchmark, change your directory to ``asv_bench/`` and run::
+
+    asv continuous -f 1.1 upstream/main HEAD
+
+You can replace ``HEAD`` with the name of the branch you are working on,
+and report benchmarks that changed by more than 10%.
+The command uses ``conda`` by default for creating the benchmark
+environments. If you want to use virtualenv instead, write::
+
+    asv continuous -f 1.1 -E virtualenv upstream/main HEAD
+
+The ``-E virtualenv`` option should be added to all ``asv`` commands
+that run benchmarks. The default value is defined in ``asv.conf.json``.
+
+Running the full benchmark suite can take up to one hour and use up a few GBs of RAM.
+Usually it is sufficient to paste only a subset of the results into the pull
+request to show that the committed changes do not cause unexpected performance
+regressions.  You can run specific benchmarks using the ``-b`` flag, which
+takes a regular expression.  For example, this will only run tests from a
+``xarray/asv_bench/benchmarks/groupby.py`` file::
+
+    asv continuous -f 1.1 upstream/main HEAD -b ^groupby
+
+If you want to only run a specific group of tests from a file, you can do it
+using ``.`` as a separator. For example::
+
+    asv continuous -f 1.1 upstream/main HEAD -b groupby.GroupByMethods
+
+will only run the ``GroupByMethods`` benchmark defined in ``groupby.py``.
+
+You can also run the benchmark suite using the version of *xarray*
+already installed in your current Python environment. This can be
+useful if you do not have ``virtualenv`` or ``conda``, or are using the
+``setup.py develop`` approach discussed above; for the in-place build
+you need to set ``PYTHONPATH``, e.g.
+``PYTHONPATH="$PWD/.." asv [remaining arguments]``.
+You can run benchmarks using an existing Python
+environment by::
+
+    asv run -e -E existing
+
+or, to use a specific Python interpreter,::
+
+    asv run -e -E existing:python3.10
+
+This will display stderr from the benchmarks, and use your local
+``python`` that comes from your ``$PATH``.
+
+Learn `how to write a benchmark and how to use asv from the documentation <https://asv.readthedocs.io/en/latest/writing_benchmarks.html>`_ .
+
+
+..
+   TODO: uncomment once we have a working setup
+         see https://github.com/pydata/xarray/pull/5066
+
+   The *xarray* benchmarking suite is run remotely and the results are
+   available `here <https://pandas.pydata.org/speed/xarray/>`_.
+
+Documenting your code
+---------------------
+
+Changes should be reflected in the release notes located in ``doc/whats-new.rst``.
+This file contains an ongoing change log for each release.  Add an entry to this file to
+document your fix, enhancement or (unavoidable) breaking change.  Make sure to include the
+GitHub issue number when adding your entry (using ``:issue:`1234```, where ``1234`` is the
+issue/pull request number).
+
+If your code is an enhancement, it is most likely necessary to add usage
+examples to the existing documentation.  This can be done by following the :ref:`guidelines for contributing to the documentation <contributing.documentation>`.
+
+.. _contributing.changes:
+
+Contributing your changes to *xarray*
+=====================================
+
+.. _contributing.committing:
+
+Committing your code
+--------------------
+
+Keep style fixes to a separate commit to make your pull request more readable.
+
+Once you've made changes, you can see them by typing::
+
+    git status
+
+If you have created a new file, it is not being tracked by git. Add it by typing::
+
+    git add path/to/file-to-be-added.py
+
+Doing 'git status' again should give something like::
+
+    # On branch shiny-new-feature
+    #
+    #       modified:   /relative/path/to/file-you-added.py
+    #
+
+The following defines how a commit message should ideally be structured:
+
+* A subject line with ``< 72`` chars.
+* One blank line.
+* Optionally, a commit message body.
+
+Please reference the relevant GitHub issues in your commit message using ``GH1234`` or
+``#1234``.  Either style is fine, but the former is generally preferred.
+
+Now you can commit your changes in your local repository::
+
+    git commit -m
+
+
+.. _contributing.pushing:
+
+Pushing your changes
+--------------------
+
+When you want your changes to appear publicly on your GitHub page, push your
+forked feature branch's commits::
+
+    git push origin shiny-new-feature
+
+Here ``origin`` is the default name given to your remote repository on GitHub.
+You can see the remote repositories::
+
+    git remote -v
+
+If you added the upstream repository as described above you will see something
+like::
+
+    origin  git@github.com:yourname/xarray.git (fetch)
+    origin  git@github.com:yourname/xarray.git (push)
+    upstream        git://github.com/pydata/xarray.git (fetch)
+    upstream        git://github.com/pydata/xarray.git (push)
+
+Now your code is on GitHub, but it is not yet a part of the *xarray* project.  For that to
+happen, a pull request needs to be submitted on GitHub.
+
+.. _contributing.review:
+
+Review your code
+----------------
+
+When you're ready to ask for a code review, file a pull request. Before you do, once
+again make sure that you have followed all the guidelines outlined in this document
+regarding code style, tests, performance tests, and documentation. You should also
+double check your branch changes against the branch it was based on:
+
+#. Navigate to your repository on GitHub -- https://github.com/your-user-name/xarray
+#. Click on ``Branches``
+#. Click on the ``Compare`` button for your feature branch
+#. Select the ``base`` and ``compare`` branches, if necessary. This will be ``main`` and
+   ``shiny-new-feature``, respectively.
+
+.. _contributing.pr:
+
+Finally, make the pull request
+------------------------------
+
+If everything looks good, you are ready to make a pull request.  A pull request is how
+code from a local repository becomes available to the GitHub community and can be looked
+at and eventually merged into the ``main`` version.  This pull request and its associated
+changes will eventually be committed to the ``main`` branch and available in the next
+release.  To submit a pull request:
+
+#. Navigate to your repository on GitHub
+#. Click on the ``Pull Request`` button
+#. You can then click on ``Commits`` and ``Files Changed`` to make sure everything looks
+   okay one last time
+#. Write a description of your changes in the ``Preview Discussion`` tab
+#. Click ``Send Pull Request``.
+
+This request then goes to the repository maintainers, and they will review
+the code.
+
+If you have made updates to the documentation, you can now see a preview of the updated docs by clicking on "Details" under
+the ``docs/readthedocs.org`` check near the bottom of the list of checks that run automatically when submitting a PR,
+then clicking on the "View Docs" button on the right (not the big green button, the small black one further down).
+
+.. image:: ../_static/view-docs.png
+
+
+If you need to make more changes, you can make them in
+your branch, add them to a new commit, push them to GitHub, and the pull request
+will automatically be updated.  Pushing them to GitHub again is done by::
+
+    git push origin shiny-new-feature
+
+This will automatically update your pull request with the latest code and restart the
+:ref:`Continuous Integration <contributing.ci>` tests.
+
+
+.. _contributing.delete:
+
+Delete your merged branch (optional)
+------------------------------------
+
+Once your feature branch is accepted into upstream, you'll probably want to get rid of
+the branch. First, update your ``main`` branch to check that the merge was successful::
+
+    git fetch upstream
+    git checkout main
+    git merge upstream/main
+
+Then you can do::
+
+    git branch -D shiny-new-feature
+
+You need to use a upper-case ``-D`` because the branch was squashed into a
+single commit before merging. Be careful with this because ``git`` won't warn
+you if you accidentally delete an unmerged branch.
+
+If you didn't delete your branch using GitHub's interface, then it will still exist on
+GitHub. To delete it there do::
+
+    git push origin --delete shiny-new-feature
+
+
+.. _contributing.checklist:
+
+PR checklist
+------------
+
+- **Properly comment and document your code.** See `"Documenting your code" <https://docs.xarray.dev/en/stable/contributing.html#documenting-your-code>`_.
+- **Test that the documentation builds correctly** by typing ``make html`` in the ``doc`` directory. This is not strictly necessary, but this may be easier than waiting for CI to catch a mistake. See `"Contributing to the documentation" <https://docs.xarray.dev/en/stable/contributing.html#contributing-to-the-documentation>`_.
+- **Test your code**.
+
+  - Write new tests if needed. See `"Test-driven development/code writing" <https://docs.xarray.dev/en/stable/contributing.html#test-driven-development-code-writing>`_.
+  - Test the code using `Pytest <https://doc.pytest.org/en/latest/>`_. Running all tests (type ``pytest`` in the root directory) takes a while, so feel free to only run the tests you think are needed based on your PR (example: ``pytest xarray/tests/test_dataarray.py``). CI will catch any failing tests.
+  - By default, the upstream dev CI is disabled on pull request and push events. You can override this behavior per commit by adding a ``[test-upstream]`` tag to the first line of the commit message. For documentation-only commits, you can skip the CI per commit by adding a ``[skip-ci]`` tag to the first line of the commit message.
+
+- **Properly format your code** and verify that it passes the formatting guidelines set by `ruff <https://github.com/astral-sh/ruff>`_. See `"Code formatting" <https://docs.xarray.dev/en/stablcontributing.html#code-formatting>`_. You can use `pre-commit <https://pre-commit.com/>`_ to run these automatically on each commit.
+
+  - Run ``pre-commit run --all-files`` in the root directory. This may modify some files. Confirm and commit any formatting changes.
+
+- **Push your code** and `create a PR on GitHub <https://help.github.com/en/articles/creating-a-pull-request>`_.
+- **Use a helpful title for your pull request** by summarizing the main contributions rather than using the latest commit message. If the PR addresses an `issue <https://github.com/pydata/xarray/issues>`_, please `reference it <https://help.github.com/en/articles/autolinked-references-and-urls>`_.
diff -pruN 2025.03.1-8/doc/contribute/developers-meeting.rst 2025.10.1-1/doc/contribute/developers-meeting.rst
--- 2025.03.1-8/doc/contribute/developers-meeting.rst	1970-01-01 00:00:00.000000000 +0000
+++ 2025.10.1-1/doc/contribute/developers-meeting.rst	2025-10-10 10:38:05.000000000 +0000
@@ -0,0 +1,22 @@
+.. _developers-meeting:
+
+Developers meeting
+------------------
+
+Xarray developers meet bi-weekly every other Wednesday.
+
+The meeting occurs on `Zoom <https://us02web.zoom.us/j/87503265754?pwd=cEFJMzFqdTFaS3BMdkx4UkNZRk1QZz09>`__.
+
+Find the `notes for the meeting here <https://hackmd.io/LFOk5e8BSnqjX3QiKWy5Mw/edit>`__.
+
+There is a :issue:`GitHub issue for changes to the meeting<4001>`.
+
+You can subscribe to this calendar to be notified of changes:
+
+* `Google Calendar <https://calendar.google.com/calendar/embed?src=59589f9634ab4ef304e8209be66cda9812dababca71eb8a01a6fa2d167f90d94%40group.calendar.google.com&ctz=America%2FLos_Angeles>`__
+* `iCal <https://calendar.google.com/calendar/ical/59589f9634ab4ef304e8209be66cda9812dababca71eb8a01a6fa2d167f90d94%40group.calendar.google.com/public/basic.ics>`__
+
+.. raw:: html
+
+   <iframe src="https://calendar.google.com/calendar/embed?src=59589f9634ab4ef304e8209be66cda9812dababca71eb8a01a6fa2d167f90d94%40group.calendar.google.com&ctz=America%2FLos_Angeles" style="border: 0" width="800" height="600" frameborder="0" scrolling="no"></iframe>
+   <script>document.getElementById("calendariframe").src = document.getElementById("calendariframe").src.replace("ctz=local", "ctz=" + Intl.DateTimeFormat().resolvedOptions().timeZone)</script>
diff -pruN 2025.03.1-8/doc/contribute/index.rst 2025.10.1-1/doc/contribute/index.rst
--- 2025.03.1-8/doc/contribute/index.rst	1970-01-01 00:00:00.000000000 +0000
+++ 2025.10.1-1/doc/contribute/index.rst	2025-10-10 10:38:05.000000000 +0000
@@ -0,0 +1,20 @@
+########################
+Xarray Developer's Guide
+########################
+
+We welcome your skills and enthusiasm at the Xarray project! There are numerous opportunities to
+contribute beyond just writing code.
+All contributions, including bug reports, bug fixes, documentation improvements, enhancement suggestions,
+and other ideas are welcome. Please review our Contributor's guide for more guidance.
+In this section you will also find documentation on the internal organization of Xarray's source code, the roadmap for current development priorities, as well as how to engage with core maintainers of the Xarray codebase.
+
+.. toctree::
+   :maxdepth: 2
+   :hidden:
+
+   contributing
+   ../internals/index
+   ../roadmap
+   ../whats-new
+   developers-meeting
+   Team <https://xarray.dev/team>
diff -pruN 2025.03.1-8/doc/contributing.rst 2025.10.1-1/doc/contributing.rst
--- 2025.03.1-8/doc/contributing.rst	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/doc/contributing.rst	1970-01-01 00:00:00.000000000 +0000
@@ -1,1073 +0,0 @@
-.. _contributing:
-
-**********************
-Contributing to xarray
-**********************
-
-.. note::
-
-  Large parts of this document came from the `Pandas Contributing
-  Guide <https://pandas.pydata.org/pandas-docs/stable/development/contributing.html>`_.
-
-Overview
-========
-
-We welcome your skills and enthusiasm at the xarray project!. There are numerous opportunities to
-contribute beyond just writing code.
-All contributions, including bug reports, bug fixes, documentation improvements, enhancement suggestions,
-and other ideas are welcome.
-
-If you have any questions on the process or how to fix something feel free to ask us!
-The recommended place to ask a question is  on `GitHub Discussions <https://github.com/pydata/xarray/discussions>`_
-, but we also have a `Discord <https://discord.com/invite/wEKPCt4PDu>`_ and a
-`mailing list <https://groups.google.com/g/xarray>`_. There is also a
-`"python-xarray" tag on Stack Overflow <https://stackoverflow.com/questions/tagged/python-xarray>`_ which we monitor for questions.
-
-We also have a biweekly community call, details of which are announced on the
-`Developers meeting <https://docs.xarray.dev/en/stable/developers-meeting.html>`_.
-You are very welcome to join! Though we would love to hear from you, there is no expectation to
-contribute during the meeting either - you are always welcome to just sit in and listen.
-
-This project is a community effort, and everyone is welcome to contribute. Everyone within the community
-is expected to abide by our `code of conduct <https://github.com/pydata/xarray/blob/main/CODE_OF_CONDUCT.md>`_.
-
-Where to start?
-===============
-
-If you are brand new to *xarray* or open-source development, we recommend going
-through the `GitHub "issues" tab <https://github.com/pydata/xarray/issues>`_
-to find issues that interest you.
-Some issues are particularly suited for new contributors by the label `Documentation <https://github.com/pydata/xarray/labels/topic-documentation>`_
-and `good first issue
-<https://github.com/pydata/xarray/labels/contrib-good-first-issue>`_ where you could start out.
-These are well documented issues, that do not require a deep understanding of the internals of xarray.
-
-Once you've found an interesting issue, you can return here to get your development environment setup.
-The xarray project does not assign issues. Issues are "assigned" by opening a Pull Request(PR).
-
-.. _contributing.bug_reports:
-
-Bug reports and enhancement requests
-====================================
-
-Bug reports are an important part of making *xarray* more stable. Having a complete bug
-report will allow others to reproduce the bug and provide insight into fixing.
-
-Trying out the bug-producing code on the *main* branch is often a worthwhile exercise
-to confirm that the bug still exists. It is also worth searching existing bug reports and
-pull requests to see if the issue has already been reported and/or fixed.
-
-Submitting a bug report
------------------------
-
-If you find a bug in the code or documentation, do not hesitate to submit a ticket to the
-`Issue Tracker <https://github.com/pydata/xarray/issues>`_.
-You are also welcome to post feature requests or pull requests.
-
-If you are reporting a bug, please use the provided template which includes the following:
-
-#. Include a short, self-contained Python snippet reproducing the problem.
-   You can format the code nicely by using `GitHub Flavored Markdown
-   <https://github.github.com/github-flavored-markdown/>`_::
-
-      ```python
-      import xarray as xr
-      ds = xr.Dataset(...)
-
-      ...
-      ```
-
-#. Include the full version string of *xarray* and its dependencies. You can use the
-   built in function::
-
-      ```python
-      import xarray as xr
-      xr.show_versions()
-
-      ...
-      ```
-
-#. Explain why the current behavior is wrong/not desired and what you expect instead.
-
-The issue will then show up to the *xarray* community and be open to comments/ideas from others.
-
-See this `stackoverflow article for tips on writing a good bug report <https://stackoverflow.com/help/mcve>`_ .
-
-
-.. _contributing.github:
-
-Now that you have an issue you want to fix, enhancement to add, or documentation
-to improve, you need to learn how to work with GitHub and the *xarray* code base.
-
-.. _contributing.version_control:
-
-Version control, Git, and GitHub
-================================
-
-The code is hosted on `GitHub <https://www.github.com/pydata/xarray>`_. To
-contribute you will need to sign up for a `free GitHub account
-<https://github.com/signup/free>`_. We use `Git <https://git-scm.com/>`_ for
-version control to allow many people to work together on the project.
-
-Some great resources for learning Git:
-
-* the `GitHub help pages <https://help.github.com/>`_.
-* the `NumPy's documentation <https://numpy.org/doc/stable/dev/index.html>`_.
-* Matthew Brett's `Pydagogue <https://matthew-brett.github.io/pydagogue/>`_.
-
-Getting started with Git
-------------------------
-
-`GitHub has instructions for setting up Git <https://help.github.com/set-up-git-redirect>`__ including installing git,
-setting up your SSH key, and configuring git.  All these steps need to be completed before
-you can work seamlessly between your local repository and GitHub.
-
-.. note::
-
-    The following instructions assume you want to learn how to interact with github via the git command-line utility,
-    but contributors who are new to git may find it easier to use other tools instead such as
-    `Github Desktop <https://desktop.github.com/>`_.
-
-Development workflow
-====================
-
-To keep your work well organized, with readable history, and in turn make it easier for project
-maintainers to see what you've done, and why you did it, we recommend you to follow workflow:
-
-1. `Create an account <https://github.com/>`_ on GitHub if you do not already have one.
-
-2. You will need your own fork to work on the code. Go to the `xarray project
-   page <https://github.com/pydata/xarray>`_ and hit the ``Fork`` button near the top of the page.
-   This creates a copy of the code under your account on the GitHub server.
-
-3. Clone your fork to your machine::
-
-    git clone https://github.com/your-user-name/xarray.git
-    cd xarray
-    git remote add upstream https://github.com/pydata/xarray.git
-
-   This creates the directory ``xarray`` and connects your repository to
-   the upstream (main project) *xarray* repository.
-
-Creating a development environment
-----------------------------------
-
-To test out code changes locally, you'll need to build *xarray* from source, which requires you to
-`create a local development environment <https://docs.xarray.dev/en/stable/contributing.html#contributing-dev-env>`_.
-
-Update the ``main`` branch
---------------------------
-
-First make sure you have followed `Setting up xarray for development
-<https://docs.xarray.dev/en/stable/contributing.html#creating-a-development-environment>`_
-
-Before starting a new set of changes, fetch all changes from ``upstream/main``, and start a new
-feature branch from that. From time to time you should fetch the upstream changes from GitHub: ::
-
-    git fetch upstream
-    git merge upstream/main
-
-This will combine your commits with the latest *xarray* git ``main``.  If this
-leads to merge conflicts, you must resolve these before submitting your pull
-request.  If you have uncommitted changes, you will need to ``git stash`` them
-prior to updating.  This will effectively store your changes, which can be
-reapplied after updating.
-
-Create a new feature branch
----------------------------
-
-Create a branch to save your changes, even before you start making changes. You want your
-``main branch`` to contain only production-ready code::
-
-    git checkout -b shiny-new-feature
-
-This changes your working directory to the ``shiny-new-feature`` branch.  Keep any changes in this
-branch specific to one bug or feature so it is clear what the branch brings to *xarray*. You can have
-many "shiny-new-features" and switch in between them using the ``git checkout`` command.
-
-Generally, you will want to keep your feature branches on your public GitHub fork of xarray. To do this,
-you ``git push`` this new branch up to your GitHub repo. Generally (if you followed the instructions in
-these pages, and by default), git will have a link to your fork of the GitHub repo, called ``origin``.
-You push up to your own fork with: ::
-
-    git push origin shiny-new-feature
-
-In git >= 1.7 you can ensure that the link is correctly set by using the ``--set-upstream`` option: ::
-
-    git push --set-upstream origin shiny-new-feature
-
-From now on git will know that ``shiny-new-feature`` is related to the ``shiny-new-feature branch`` in the GitHub repo.
-
-The editing workflow
---------------------
-
-1. Make some changes
-
-2. See which files have changed with ``git status``. You'll see a listing like this one: ::
-
-    # On branch shiny-new-feature
-    # Changed but not updated:
-    #   (use "git add <file>..." to update what will be committed)
-    #   (use "git checkout -- <file>..." to discard changes in working directory)
-    #
-    #  modified:   README
-
-3. Check what the actual changes are with ``git diff``.
-
-4. Build the `documentation run <https://docs.xarray.dev/en/stable/contributing.html#building-the-documentation>`_
-for the documentation changes.
-
-`Run the test suite <https://docs.xarray.dev/en/stable/contributing.html#running-the-test-suite>`_ for code changes.
-
-Commit and push your changes
-----------------------------
-
-1. To commit all modified files into the local copy of your repo, do ``git commit -am 'A commit message'``.
-
-2. To push the changes up to your forked repo on GitHub, do a ``git push``.
-
-Open a pull request
--------------------
-
-When you're ready or need feedback on your code, open a Pull Request (PR) so that the xarray developers can
-give feedback and eventually include your suggested code into the ``main`` branch.
-`Pull requests (PRs) on GitHub <https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/proposing-changes-to-your-work-with-pull-requests/about-pull-requests>`_
-are the mechanism for contributing to xarray's code and documentation.
-
-Enter a title for the set of changes with some explanation of what you've done.
-Follow the PR template, which looks like this. ::
-
-    [ ]Closes #xxxx
-    [ ]Tests added
-    [ ]User visible changes (including notable bug fixes) are documented in whats-new.rst
-    [ ]New functions/methods are listed in api.rst
-
-Mention anything you'd like particular attention for - such as a complicated change or some code you are not happy with.
-If you don't think your request is ready to be merged, just say so in your pull request message and use
-the "Draft PR" feature of GitHub. This is a good way of getting some preliminary code review.
-
-.. _contributing.dev_env:
-
-Creating a development environment
-==================================
-
-To test out code changes locally, you'll need to build *xarray* from source, which
-requires a Python environment. If you're making documentation changes, you can
-skip to :ref:`contributing.documentation` but you won't be able to build the
-documentation locally before pushing your changes.
-
-.. note::
-
-    For small changes, such as fixing a typo, you don't necessarily need to build and test xarray locally.
-    If you make your changes then :ref:`commit and push them to a new branch <contributing.changes>`,
-    xarray's automated :ref:`continuous integration tests <contributing.ci>` will run and check your code in various ways.
-    You can then try to fix these problems by committing and pushing more commits to the same branch.
-
-    You can also avoid building the documentation locally by instead :ref:`viewing the updated documentation via the CI <contributing.pr>`.
-
-    To speed up this feedback loop or for more complex development tasks you should build and test xarray locally.
-
-
-.. _contributing.dev_python:
-
-Creating a Python Environment
------------------------------
-
-Before starting any development, you'll need to create an isolated xarray
-development environment:
-
-- Install either `Anaconda <https://www.anaconda.com/download/>`_ or `miniconda
-  <https://conda.io/miniconda.html>`_
-- Make sure your conda is up to date (``conda update conda``)
-- Make sure that you have :ref:`cloned the repository <contributing.forking>`
-- ``cd`` to the *xarray* source directory
-
-We'll now kick off a two-step process:
-
-1. Install the build dependencies
-2. Build and install xarray
-
-.. code-block:: sh
-
-   # Create and activate the build environment
-   conda create -c conda-forge -n xarray-tests python=3.10
-
-   # This is for Linux and MacOS
-   conda env update -f ci/requirements/environment.yml
-
-   # On windows, use environment-windows.yml instead
-   conda env update -f ci/requirements/environment-windows.yml
-
-   conda activate xarray-tests
-
-   # or with older versions of Anaconda:
-   source activate xarray-tests
-
-   # Build and install xarray
-   pip install -e .
-
-At this point you should be able to import *xarray* from your locally
-built version:
-
-.. code-block:: sh
-
-   $ python  # start an interpreter
-   >>> import xarray
-   >>> xarray.__version__
-   '0.10.0+dev46.g015daca'
-
-This will create the new environment, and not touch any of your existing environments,
-nor any existing Python installation.
-
-To view your environments::
-
-      conda info -e
-
-To return to your root environment::
-
-      conda deactivate
-
-See the full `conda docs here <https://conda.pydata.org/docs>`__.
-
-Install pre-commit hooks
-------------------------
-
-We highly recommend that you setup `pre-commit <https://pre-commit.com/>`_ hooks to automatically
-run all the above tools every time you make a git commit. To install the hooks::
-
-    python -m pip install pre-commit
-    pre-commit install
-
-This can be done by running: ::
-
-    pre-commit run
-
-from the root of the xarray repository. You can skip the pre-commit checks with
-``git commit --no-verify``.
-
-.. _contributing.documentation:
-
-Contributing to the documentation
-=================================
-
-If you're not the developer type, contributing to the documentation is still of
-huge value. You don't even have to be an expert on *xarray* to do so! In fact,
-there are sections of the docs that are worse off after being written by
-experts. If something in the docs doesn't make sense to you, updating the
-relevant section after you figure it out is a great way to ensure it will help
-the next person.
-
-.. contents:: Documentation:
-   :local:
-
-
-About the *xarray* documentation
---------------------------------
-
-The documentation is written in **reStructuredText**, which is almost like writing
-in plain English, and built using `Sphinx <https://www.sphinx-doc.org/>`__. The
-Sphinx Documentation has an excellent `introduction to reST
-<https://www.sphinx-doc.org/en/master/usage/restructuredtext/basics.html>`__. Review the Sphinx docs to perform more
-complex changes to the documentation as well.
-
-Some other important things to know about the docs:
-
-- The *xarray* documentation consists of two parts: the docstrings in the code
-  itself and the docs in this folder ``xarray/doc/``.
-
-  The docstrings are meant to provide a clear explanation of the usage of the
-  individual functions, while the documentation in this folder consists of
-  tutorial-like overviews per topic together with some other information
-  (what's new, installation, etc).
-
-- The docstrings follow the **NumPy Docstring Standard**, which is used widely
-  in the Scientific Python community. This standard specifies the format of
-  the different sections of the docstring. Refer to the `documentation for the Numpy docstring format
-  <https://numpydoc.readthedocs.io/en/latest/format.html#docstring-standard>`_
-  for a detailed explanation, or look at some of the existing functions to
-  extend it in a similar manner.
-
-- The tutorials make heavy use of the `ipython directive
-  <https://matplotlib.org/sampledoc/ipython_directive.html>`_ sphinx extension.
-  This directive lets you put code in the documentation which will be run
-  during the doc build. For example:
-
-  .. code:: rst
-
-      .. ipython:: python
-
-          x = 2
-          x**3
-
-  will be rendered as::
-
-      In [1]: x = 2
-
-      In [2]: x**3
-      Out[2]: 8
-
-  Almost all code examples in the docs are run (and the output saved) during the
-  doc build. This approach means that code examples will always be up to date,
-  but it does make building the docs a bit more complex.
-
-- Our API documentation in ``doc/api.rst`` houses the auto-generated
-  documentation from the docstrings. For classes, there are a few subtleties
-  around controlling which methods and attributes have pages auto-generated.
-
-  Every method should be included in a ``toctree`` in ``api.rst``, else Sphinx
-  will emit a warning.
-
-
-How to build the *xarray* documentation
----------------------------------------
-
-Requirements
-~~~~~~~~~~~~
-Make sure to follow the instructions on :ref:`creating a development environment above <contributing.dev_env>`, but
-to build the docs you need to use the environment file ``ci/requirements/doc.yml``.
-You should also use this environment and these steps if you want to view changes you've made to the docstrings.
-
-.. code-block:: sh
-
-    # Create and activate the docs environment
-    conda env create -f ci/requirements/doc.yml
-    conda activate xarray-docs
-
-    # or with older versions of Anaconda:
-    source activate xarray-docs
-
-    # Build and install a local, editable version of xarray
-    pip install -e .
-
-Building the documentation
-~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-To build the documentation run::
-
-    cd doc/
-    make html
-
-Then you can find the HTML output files in the folder ``xarray/doc/_build/html/``.
-
-To see what the documentation now looks like with your changes, you can view the HTML build locally by opening the files in your local browser.
-For example, if you normally use Google Chrome as your browser, you could enter::
-
-    google-chrome _build/html/quick-overview.html
-
-in the terminal, running from within the ``doc/`` folder.
-You should now see a new tab pop open in your local browser showing the ``quick-overview`` page of the documentation.
-The different pages of this local build of the documentation are linked together,
-so you can browse the whole documentation by following links the same way you would on the officially-hosted xarray docs site.
-
-The first time you build the docs, it will take quite a while because it has to run
-all the code examples and build all the generated docstring pages. In subsequent
-evocations, Sphinx will try to only build the pages that have been modified.
-
-If you want to do a full clean build, do::
-
-    make clean
-    make html
-
-Writing ReST pages
-------------------
-
-Most documentation is either in the docstrings of individual classes and methods, in explicit
-``.rst`` files, or in examples and tutorials. All of these use the
-`ReST <https://docutils.sourceforge.io/rst.html>`_ syntax and are processed by
-`Sphinx <https://www.sphinx-doc.org/en/master/>`_.
-
-This section contains additional information and conventions how ReST is used in the
-xarray documentation.
-
-Section formatting
-~~~~~~~~~~~~~~~~~~
-
-We aim to follow the recommendations from the
-`Python documentation <https://devguide.python.org/documentation/start-documenting/index.html#sections>`_
-and the `Sphinx reStructuredText documentation <https://www.sphinx-doc.org/en/master/usage/restructuredtext/basics.html#sections>`_
-for section markup characters,
-
-- ``*`` with overline, for chapters
-
-- ``=``, for heading
-
-- ``-``, for sections
-
-- ``~``, for subsections
-
-- ``**`` text ``**``, for **bold** text
-
-Referring to other documents and sections
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-`Sphinx  <https://www.sphinx-doc.org/en/master/>`_ allows internal
-`references <https://www.sphinx-doc.org/en/master/usage/restructuredtext/roles.html>`_ between documents.
-
-Documents can be linked with the ``:doc:`` directive:
-
-::
-
-    See the :doc:`/getting-started-guide/installing`
-
-    See the :doc:`/getting-started-guide/quick-overview`
-
-will render as:
-
-See the `Installation <https://docs.xarray.dev/en/stable/getting-started-guide/installing.html>`_
-
-See the `Quick Overview <https://docs.xarray.dev/en/stable/getting-started-guide/quick-overview.html>`_
-
-Including figures and files
-~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-Image files can be directly included in pages with the ``image::`` directive.
-
-.. _contributing.code:
-
-Contributing to the code base
-=============================
-
-.. contents:: Code Base:
-   :local:
-
-Code standards
---------------
-
-Writing good code is not just about what you write. It is also about *how* you
-write it. During :ref:`Continuous Integration <contributing.ci>` testing, several
-tools will be run to check your code for stylistic errors.
-Generating any warnings will cause the test to fail.
-Thus, good style is a requirement for submitting code to *xarray*.
-
-In addition, because a lot of people use our library, it is important that we
-do not make sudden changes to the code that could have the potential to break
-a lot of user code as a result, that is, we need it to be as *backwards compatible*
-as possible to avoid mass breakages.
-
-Code Formatting
-~~~~~~~~~~~~~~~
-
-xarray uses several tools to ensure a consistent code format throughout the project:
-
-- `ruff <https://github.com/astral-sh/ruff>`_ for formatting, code quality checks and standardized order in imports
-- `absolufy-imports <https://github.com/MarcoGorelli/absolufy-imports>`_ for absolute instead of relative imports from different files,
-- `mypy <https://mypy-lang.org/>`_ for static type checking on `type hints
-  <https://docs.python.org/3/library/typing.html>`_.
-
-We highly recommend that you setup `pre-commit hooks <https://pre-commit.com/>`_
-to automatically run all the above tools every time you make a git commit. This
-can be done by running::
-
-   pre-commit install
-
-from the root of the xarray repository. You can skip the pre-commit checks
-with ``git commit --no-verify``.
-
-
-Backwards Compatibility
-~~~~~~~~~~~~~~~~~~~~~~~
-
-Please try to maintain backwards compatibility. *xarray* has a growing number of users with
-lots of existing code, so don't break it if at all possible.  If you think breakage is
-required, clearly state why as part of the pull request.
-
-Be especially careful when changing function and method signatures, because any change
-may require a deprecation warning. For example, if your pull request means that the
-argument ``old_arg`` to ``func`` is no longer valid, instead of simply raising an error if
-a user passes ``old_arg``, we would instead catch it:
-
-.. code-block:: python
-
-    def func(new_arg, old_arg=None):
-        if old_arg is not None:
-            from xarray.core.utils import emit_user_level_warning
-
-            emit_user_level_warning(
-                "`old_arg` has been deprecated, and in the future will raise an error."
-                "Please use `new_arg` from now on.",
-                DeprecationWarning,
-            )
-
-            # Still do what the user intended here
-
-This temporary check would then be removed in a subsequent version of xarray.
-This process of first warning users before actually breaking their code is known as a
-"deprecation cycle", and makes changes significantly easier to handle both for users
-of xarray, and for developers of other libraries that depend on xarray.
-
-
-.. _contributing.ci:
-
-Testing With Continuous Integration
------------------------------------
-
-The *xarray* test suite runs automatically via the
-`GitHub Actions <https://docs.github.com/en/free-pro-team@latest/actions>`__,
-continuous integration service, once your pull request is submitted.
-
-A pull-request will be considered for merging when you have an all 'green' build. If any
-tests are failing, then you will get a red 'X', where you can click through to see the
-individual failed tests. This is an example of a green build.
-
-.. image:: _static/ci.png
-
-.. note::
-
-   Each time you push to your PR branch, a new run of the tests will be
-   triggered on the CI. If they haven't already finished, tests for any older
-   commits on the same branch will be automatically cancelled.
-
-.. _contributing.tdd:
-
-
-Test-driven development/code writing
-------------------------------------
-
-*xarray* is serious about testing and strongly encourages contributors to embrace
-`test-driven development (TDD) <https://en.wikipedia.org/wiki/Test-driven_development>`_.
-This development process "relies on the repetition of a very short development cycle:
-first the developer writes an (initially failing) automated test case that defines a desired
-improvement or new function, then produces the minimum amount of code to pass that test."
-So, before actually writing any code, you should write your tests.  Often the test can be
-taken from the original GitHub issue.  However, it is always worth considering additional
-use cases and writing corresponding tests.
-
-Adding tests is one of the most common requests after code is pushed to *xarray*.  Therefore,
-it is worth getting in the habit of writing tests ahead of time so that this is never an issue.
-
-Like many packages, *xarray* uses `pytest
-<https://doc.pytest.org/en/latest/>`_ and the convenient
-extensions in `numpy.testing
-<https://numpy.org/doc/stable/reference/routines.testing.html>`_.
-
-Writing tests
-~~~~~~~~~~~~~
-
-All tests should go into the ``tests`` subdirectory of the specific package.
-This folder contains many current examples of tests, and we suggest looking to these for
-inspiration.
-
-The ``xarray.testing`` module has many special ``assert`` functions that
-make it easier to make statements about whether DataArray or Dataset objects are
-equivalent. The easiest way to verify that your code is correct is to
-explicitly construct the result you expect, then compare the actual result to
-the expected correct result::
-
-    def test_constructor_from_0d():
-        expected = Dataset({None: ([], 0)})[None]
-        actual = DataArray(0)
-        assert_identical(expected, actual)
-
-Transitioning to ``pytest``
-~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-*xarray* existing test structure is *mostly* class-based, meaning that you will
-typically find tests wrapped in a class.
-
-.. code-block:: python
-
-    class TestReallyCoolFeature: ...
-
-Going forward, we are moving to a more *functional* style using the
-`pytest <https://doc.pytest.org/en/latest/>`__ framework, which offers a richer
-testing framework that will facilitate testing and developing. Thus, instead of
-writing test classes, we will write test functions like this:
-
-.. code-block:: python
-
-    def test_really_cool_feature(): ...
-
-Using ``pytest``
-~~~~~~~~~~~~~~~~
-
-Here is an example of a self-contained set of tests that illustrate multiple
-features that we like to use.
-
-- functional style: tests are like ``test_*`` and *only* take arguments that are either
-  fixtures or parameters
-- ``pytest.mark`` can be used to set metadata on test functions, e.g. ``skip`` or ``xfail``.
-- using ``parametrize``: allow testing of multiple cases
-- to set a mark on a parameter, ``pytest.param(..., marks=...)`` syntax should be used
-- ``fixture``, code for object construction, on a per-test basis
-- using bare ``assert`` for scalars and truth-testing
-- ``assert_equal`` and ``assert_identical`` from the ``xarray.testing`` module for xarray object comparisons.
-- the typical pattern of constructing an ``expected`` and comparing versus the ``result``
-
-We would name this file ``test_cool_feature.py`` and put in an appropriate place in the
-``xarray/tests/`` structure.
-
-.. code-block:: python
-
-    import pytest
-    import numpy as np
-    import xarray as xr
-    from xarray.testing import assert_equal
-
-
-    @pytest.mark.parametrize("dtype", ["int8", "int16", "int32", "int64"])
-    def test_dtypes(dtype):
-        assert str(np.dtype(dtype)) == dtype
-
-
-    @pytest.mark.parametrize(
-        "dtype",
-        [
-            "float32",
-            pytest.param("int16", marks=pytest.mark.skip),
-            pytest.param(
-                "int32", marks=pytest.mark.xfail(reason="to show how it works")
-            ),
-        ],
-    )
-    def test_mark(dtype):
-        assert str(np.dtype(dtype)) == "float32"
-
-
-    @pytest.fixture
-    def dataarray():
-        return xr.DataArray([1, 2, 3])
-
-
-    @pytest.fixture(params=["int8", "int16", "int32", "int64"])
-    def dtype(request):
-        return request.param
-
-
-    def test_series(dataarray, dtype):
-        result = dataarray.astype(dtype)
-        assert result.dtype == dtype
-
-        expected = xr.DataArray(np.array([1, 2, 3], dtype=dtype))
-        assert_equal(result, expected)
-
-
-
-A test run of this yields
-
-.. code-block:: shell
-
-    ((xarray) $ pytest test_cool_feature.py -v
-    ================================= test session starts ==================================
-    platform darwin -- Python 3.10.6, pytest-7.2.0, pluggy-1.0.0 --
-    cachedir: .pytest_cache
-    plugins: hypothesis-6.56.3, cov-4.0.0
-    collected 11 items
-
-    xarray/tests/test_cool_feature.py::test_dtypes[int8] PASSED                       [  9%]
-    xarray/tests/test_cool_feature.py::test_dtypes[int16] PASSED                      [ 18%]
-    xarray/tests/test_cool_feature.py::test_dtypes[int32] PASSED                      [ 27%]
-    xarray/tests/test_cool_feature.py::test_dtypes[int64] PASSED                      [ 36%]
-    xarray/tests/test_cool_feature.py::test_mark[float32] PASSED                      [ 45%]
-    xarray/tests/test_cool_feature.py::test_mark[int16] SKIPPED (unconditional skip)  [ 54%]
-    xarray/tests/test_cool_feature.py::test_mark[int32] XFAIL (to show how it works)  [ 63%]
-    xarray/tests/test_cool_feature.py::test_series[int8] PASSED                       [ 72%]
-    xarray/tests/test_cool_feature.py::test_series[int16] PASSED                      [ 81%]
-    xarray/tests/test_cool_feature.py::test_series[int32] PASSED                      [ 90%]
-    xarray/tests/test_cool_feature.py::test_series[int64] PASSED                      [100%]
-
-
-    ==================== 9 passed, 1 skipped, 1 xfailed in 1.83 seconds ====================
-
-Tests that we have ``parametrized`` are now accessible via the test name, for
-example we could run these with ``-k int8`` to sub-select *only* those tests
-which match ``int8``.
-
-
-.. code-block:: shell
-
-   ((xarray) bash-3.2$ pytest  test_cool_feature.py  -v -k int8
-   ================================== test session starts ==================================
-   platform darwin -- Python 3.10.6, pytest-7.2.0, pluggy-1.0.0 --
-   cachedir: .pytest_cache
-   plugins: hypothesis-6.56.3, cov-4.0.0
-   collected 11 items
-
-   test_cool_feature.py::test_dtypes[int8] PASSED
-   test_cool_feature.py::test_series[int8] PASSED
-
-
-Running the test suite
-----------------------
-
-The tests can then be run directly inside your Git clone (without having to
-install *xarray*) by typing::
-
-    pytest xarray
-
-The tests suite is exhaustive and takes a few minutes.  Often it is
-worth running only a subset of tests first around your changes before running the
-entire suite.
-
-The easiest way to do this is with::
-
-    pytest xarray/path/to/test.py -k regex_matching_test_name
-
-Or with one of the following constructs::
-
-    pytest xarray/tests/[test-module].py
-    pytest xarray/tests/[test-module].py::[TestClass]
-    pytest xarray/tests/[test-module].py::[TestClass]::[test_method]
-
-Using `pytest-xdist <https://pypi.python.org/pypi/pytest-xdist>`_, one can
-speed up local testing on multicore machines, by running pytest with the optional -n argument::
-
-    pytest xarray -n 4
-
-This can significantly reduce the time it takes to locally run tests before
-submitting a pull request.
-
-For more, see the `pytest <https://doc.pytest.org/en/latest/>`_ documentation.
-
-Running the performance test suite
-----------------------------------
-
-Performance matters and it is worth considering whether your code has introduced
-performance regressions.  *xarray* is starting to write a suite of benchmarking tests
-using `asv <https://github.com/airspeed-velocity/asv>`__
-to enable easy monitoring of the performance of critical *xarray* operations.
-These benchmarks are all found in the ``xarray/asv_bench`` directory.
-
-To use all features of asv, you will need either ``conda`` or
-``virtualenv``. For more details please check the `asv installation
-webpage <https://asv.readthedocs.io/en/stable/installing.html>`_.
-
-To install asv::
-
-    python -m pip install asv
-
-If you need to run a benchmark, change your directory to ``asv_bench/`` and run::
-
-    asv continuous -f 1.1 upstream/main HEAD
-
-You can replace ``HEAD`` with the name of the branch you are working on,
-and report benchmarks that changed by more than 10%.
-The command uses ``conda`` by default for creating the benchmark
-environments. If you want to use virtualenv instead, write::
-
-    asv continuous -f 1.1 -E virtualenv upstream/main HEAD
-
-The ``-E virtualenv`` option should be added to all ``asv`` commands
-that run benchmarks. The default value is defined in ``asv.conf.json``.
-
-Running the full benchmark suite can take up to one hour and use up a few GBs of RAM.
-Usually it is sufficient to paste only a subset of the results into the pull
-request to show that the committed changes do not cause unexpected performance
-regressions.  You can run specific benchmarks using the ``-b`` flag, which
-takes a regular expression.  For example, this will only run tests from a
-``xarray/asv_bench/benchmarks/groupby.py`` file::
-
-    asv continuous -f 1.1 upstream/main HEAD -b ^groupby
-
-If you want to only run a specific group of tests from a file, you can do it
-using ``.`` as a separator. For example::
-
-    asv continuous -f 1.1 upstream/main HEAD -b groupby.GroupByMethods
-
-will only run the ``GroupByMethods`` benchmark defined in ``groupby.py``.
-
-You can also run the benchmark suite using the version of *xarray*
-already installed in your current Python environment. This can be
-useful if you do not have ``virtualenv`` or ``conda``, or are using the
-``setup.py develop`` approach discussed above; for the in-place build
-you need to set ``PYTHONPATH``, e.g.
-``PYTHONPATH="$PWD/.." asv [remaining arguments]``.
-You can run benchmarks using an existing Python
-environment by::
-
-    asv run -e -E existing
-
-or, to use a specific Python interpreter,::
-
-    asv run -e -E existing:python3.10
-
-This will display stderr from the benchmarks, and use your local
-``python`` that comes from your ``$PATH``.
-
-Learn `how to write a benchmark and how to use asv from the documentation <https://asv.readthedocs.io/en/latest/writing_benchmarks.html>`_ .
-
-
-..
-   TODO: uncomment once we have a working setup
-         see https://github.com/pydata/xarray/pull/5066
-
-   The *xarray* benchmarking suite is run remotely and the results are
-   available `here <https://pandas.pydata.org/speed/xarray/>`_.
-
-Documenting your code
----------------------
-
-Changes should be reflected in the release notes located in ``doc/whats-new.rst``.
-This file contains an ongoing change log for each release.  Add an entry to this file to
-document your fix, enhancement or (unavoidable) breaking change.  Make sure to include the
-GitHub issue number when adding your entry (using ``:issue:`1234```, where ``1234`` is the
-issue/pull request number).
-
-If your code is an enhancement, it is most likely necessary to add usage
-examples to the existing documentation.  This can be done by following the :ref:`guidelines for contributing to the documentation <contributing.documentation>`.
-
-.. _contributing.changes:
-
-Contributing your changes to *xarray*
-=====================================
-
-.. _contributing.committing:
-
-Committing your code
---------------------
-
-Keep style fixes to a separate commit to make your pull request more readable.
-
-Once you've made changes, you can see them by typing::
-
-    git status
-
-If you have created a new file, it is not being tracked by git. Add it by typing::
-
-    git add path/to/file-to-be-added.py
-
-Doing 'git status' again should give something like::
-
-    # On branch shiny-new-feature
-    #
-    #       modified:   /relative/path/to/file-you-added.py
-    #
-
-The following defines how a commit message should ideally be structured:
-
-* A subject line with ``< 72`` chars.
-* One blank line.
-* Optionally, a commit message body.
-
-Please reference the relevant GitHub issues in your commit message using ``GH1234`` or
-``#1234``.  Either style is fine, but the former is generally preferred.
-
-Now you can commit your changes in your local repository::
-
-    git commit -m
-
-
-.. _contributing.pushing:
-
-Pushing your changes
---------------------
-
-When you want your changes to appear publicly on your GitHub page, push your
-forked feature branch's commits::
-
-    git push origin shiny-new-feature
-
-Here ``origin`` is the default name given to your remote repository on GitHub.
-You can see the remote repositories::
-
-    git remote -v
-
-If you added the upstream repository as described above you will see something
-like::
-
-    origin  git@github.com:yourname/xarray.git (fetch)
-    origin  git@github.com:yourname/xarray.git (push)
-    upstream        git://github.com/pydata/xarray.git (fetch)
-    upstream        git://github.com/pydata/xarray.git (push)
-
-Now your code is on GitHub, but it is not yet a part of the *xarray* project.  For that to
-happen, a pull request needs to be submitted on GitHub.
-
-.. _contributing.review:
-
-Review your code
-----------------
-
-When you're ready to ask for a code review, file a pull request. Before you do, once
-again make sure that you have followed all the guidelines outlined in this document
-regarding code style, tests, performance tests, and documentation. You should also
-double check your branch changes against the branch it was based on:
-
-#. Navigate to your repository on GitHub -- https://github.com/your-user-name/xarray
-#. Click on ``Branches``
-#. Click on the ``Compare`` button for your feature branch
-#. Select the ``base`` and ``compare`` branches, if necessary. This will be ``main`` and
-   ``shiny-new-feature``, respectively.
-
-.. _contributing.pr:
-
-Finally, make the pull request
-------------------------------
-
-If everything looks good, you are ready to make a pull request.  A pull request is how
-code from a local repository becomes available to the GitHub community and can be looked
-at and eventually merged into the ``main`` version.  This pull request and its associated
-changes will eventually be committed to the ``main`` branch and available in the next
-release.  To submit a pull request:
-
-#. Navigate to your repository on GitHub
-#. Click on the ``Pull Request`` button
-#. You can then click on ``Commits`` and ``Files Changed`` to make sure everything looks
-   okay one last time
-#. Write a description of your changes in the ``Preview Discussion`` tab
-#. Click ``Send Pull Request``.
-
-This request then goes to the repository maintainers, and they will review
-the code.
-
-If you have made updates to the documentation, you can now see a preview of the updated docs by clicking on "Details" under
-the ``docs/readthedocs.org`` check near the bottom of the list of checks that run automatically when submitting a PR,
-then clicking on the "View Docs" button on the right (not the big green button, the small black one further down).
-
-.. image:: _static/view-docs.png
-
-
-If you need to make more changes, you can make them in
-your branch, add them to a new commit, push them to GitHub, and the pull request
-will automatically be updated.  Pushing them to GitHub again is done by::
-
-    git push origin shiny-new-feature
-
-This will automatically update your pull request with the latest code and restart the
-:ref:`Continuous Integration <contributing.ci>` tests.
-
-
-.. _contributing.delete:
-
-Delete your merged branch (optional)
-------------------------------------
-
-Once your feature branch is accepted into upstream, you'll probably want to get rid of
-the branch. First, update your ``main`` branch to check that the merge was successful::
-
-    git fetch upstream
-    git checkout main
-    git merge upstream/main
-
-Then you can do::
-
-    git branch -D shiny-new-feature
-
-You need to use a upper-case ``-D`` because the branch was squashed into a
-single commit before merging. Be careful with this because ``git`` won't warn
-you if you accidentally delete an unmerged branch.
-
-If you didn't delete your branch using GitHub's interface, then it will still exist on
-GitHub. To delete it there do::
-
-    git push origin --delete shiny-new-feature
-
-
-.. _contributing.checklist:
-
-PR checklist
-------------
-
-- **Properly comment and document your code.** See `"Documenting your code" <https://docs.xarray.dev/en/stable/contributing.html#documenting-your-code>`_.
-- **Test that the documentation builds correctly** by typing ``make html`` in the ``doc`` directory. This is not strictly necessary, but this may be easier than waiting for CI to catch a mistake. See `"Contributing to the documentation" <https://docs.xarray.dev/en/stable/contributing.html#contributing-to-the-documentation>`_.
-- **Test your code**.
-
-  - Write new tests if needed. See `"Test-driven development/code writing" <https://docs.xarray.dev/en/stable/contributing.html#test-driven-development-code-writing>`_.
-  - Test the code using `Pytest <https://doc.pytest.org/en/latest/>`_. Running all tests (type ``pytest`` in the root directory) takes a while, so feel free to only run the tests you think are needed based on your PR (example: ``pytest xarray/tests/test_dataarray.py``). CI will catch any failing tests.
-  - By default, the upstream dev CI is disabled on pull request and push events. You can override this behavior per commit by adding a ``[test-upstream]`` tag to the first line of the commit message. For documentation-only commits, you can skip the CI per commit by adding a ``[skip-ci]`` tag to the first line of the commit message.
-
-- **Properly format your code** and verify that it passes the formatting guidelines set by `ruff <https://github.com/astral-sh/ruff>`_. See `"Code formatting" <https://docs.xarray.dev/en/stablcontributing.html#code-formatting>`_. You can use `pre-commit <https://pre-commit.com/>`_ to run these automatically on each commit.
-
-  - Run ``pre-commit run --all-files`` in the root directory. This may modify some files. Confirm and commit any formatting changes.
-
-- **Push your code** and `create a PR on GitHub <https://help.github.com/en/articles/creating-a-pull-request>`_.
-- **Use a helpful title for your pull request** by summarizing the main contributions rather than using the latest commit message. If the PR addresses an `issue <https://github.com/pydata/xarray/issues>`_, please `reference it <https://help.github.com/en/articles/autolinked-references-and-urls>`_.
diff -pruN 2025.03.1-8/doc/developers-meeting.rst 2025.10.1-1/doc/developers-meeting.rst
--- 2025.03.1-8/doc/developers-meeting.rst	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/doc/developers-meeting.rst	1970-01-01 00:00:00.000000000 +0000
@@ -1,20 +0,0 @@
-Developers meeting
-------------------
-
-Xarray developers meet bi-weekly every other Wednesday.
-
-The meeting occurs on `Zoom <https://us02web.zoom.us/j/87503265754?pwd=cEFJMzFqdTFaS3BMdkx4UkNZRk1QZz09>`__.
-
-Find the `notes for the meeting here <https://hackmd.io/LFOk5e8BSnqjX3QiKWy5Mw/edit>`__.
-
-There is a :issue:`GitHub issue for changes to the meeting<4001>`.
-
-You can subscribe to this calendar to be notified of changes:
-
-* `Google Calendar <https://calendar.google.com/calendar/embed?src=59589f9634ab4ef304e8209be66cda9812dababca71eb8a01a6fa2d167f90d94%40group.calendar.google.com&ctz=America%2FLos_Angeles>`__
-* `iCal <https://calendar.google.com/calendar/ical/59589f9634ab4ef304e8209be66cda9812dababca71eb8a01a6fa2d167f90d94%40group.calendar.google.com/public/basic.ics>`__
-
-.. raw:: html
-
-   <iframe src="https://calendar.google.com/calendar/embed?src=59589f9634ab4ef304e8209be66cda9812dababca71eb8a01a6fa2d167f90d94%40group.calendar.google.com&ctz=America%2FLos_Angeles" style="border: 0" width="800" height="600" frameborder="0" scrolling="no"></iframe>
-   <script>document.getElementById("calendariframe").src = document.getElementById("calendariframe").src.replace("ctz=local", "ctz=" + Intl.DateTimeFormat().resolvedOptions().timeZone)</script>
diff -pruN 2025.03.1-8/doc/ecosystem.rst 2025.10.1-1/doc/ecosystem.rst
--- 2025.03.1-8/doc/ecosystem.rst	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/doc/ecosystem.rst	1970-01-01 00:00:00.000000000 +0000
@@ -1,107 +0,0 @@
-.. _ecosystem:
-
-Xarray related projects
------------------------
-
-Below is a list of existing open source projects that build
-functionality upon xarray. See also section :ref:`internals` for more
-details on how to build xarray extensions. We also maintain the
-`xarray-contrib <https://github.com/xarray-contrib>`_ GitHub organization
-as a place to curate projects that build upon xarray.
-
-Geosciences
-~~~~~~~~~~~
-
-- `aospy <https://aospy.readthedocs.io>`_: Automated analysis and management of gridded climate data.
-- `argopy <https://github.com/euroargodev/argopy>`_: xarray-based Argo data access, manipulation and visualisation for standard users as well as Argo experts.
-- `climpred <https://climpred.readthedocs.io>`_: Analysis of ensemble forecast models for climate prediction.
-- `geocube <https://corteva.github.io/geocube>`_: Tool to convert geopandas vector data into rasterized xarray data.
-- `GeoWombat <https://github.com/jgrss/geowombat>`_: Utilities for analysis of remotely sensed and gridded raster data at scale (easily tame Landsat, Sentinel, Quickbird, and PlanetScope).
-- `grib2io <https://github.com/NOAA-MDL/grib2io>`_: Utility to work with GRIB2 files including an xarray backend, DASK support for parallel reading in open_mfdataset, lazy loading of data, editing of GRIB2 attributes and GRIB2IO DataArray attrs, and spatial interpolation and reprojection of GRIB2 messages and GRIB2IO Datasets/DataArrays for both grid to grid and grid to stations.
-- `gsw-xarray <https://github.com/DocOtak/gsw-xarray>`_: a wrapper around `gsw <https://teos-10.github.io/GSW-Python>`_ that adds CF compliant attributes when possible, units, name.
-- `infinite-diff <https://github.com/spencerahill/infinite-diff>`_: xarray-based finite-differencing, focused on gridded climate/meteorology data
-- `marc_analysis <https://github.com/darothen/marc_analysis>`_: Analysis package for CESM/MARC experiments and output.
-- `MetPy <https://unidata.github.io/MetPy/dev/index.html>`_: A collection of tools in Python for reading, visualizing, and performing calculations with weather data.
-- `MPAS-Analysis <https://mpas-dev.github.io/MPAS-Analysis>`_: Analysis for simulations produced with Model for Prediction Across Scales (MPAS) components and the Accelerated Climate Model for Energy (ACME).
-- `OGGM <https://oggm.org/>`_: Open Global Glacier Model
-- `Oocgcm <https://oocgcm.readthedocs.io/>`_: Analysis of large gridded geophysical datasets
-- `Open Data Cube <https://www.opendatacube.org/>`_: Analysis toolkit of continental scale Earth Observation data from satellites.
-- `Pangaea <https://pangaea.readthedocs.io/en/latest/>`_: xarray extension for gridded land surface & weather model output).
-- `Pangeo <https://pangeo.io>`_: A community effort for big data geoscience in the cloud.
-- `PyGDX <https://pygdx.readthedocs.io/en/latest/>`_: Python 3 package for
-  accessing data stored in GAMS Data eXchange (GDX) files. Also uses a custom
-  subclass.
-- `pyinterp <https://pangeo-pyinterp.readthedocs.io/en/latest/>`_: Python 3 package for interpolating geo-referenced data used in the field of geosciences.
-- `pyXpcm <https://pyxpcm.readthedocs.io>`_: xarray-based Profile Classification Modelling (PCM), mostly for ocean data.
-- `Regionmask <https://regionmask.readthedocs.io/>`_: plotting and creation of masks of spatial regions
-- `rioxarray <https://corteva.github.io/rioxarray>`_: geospatial xarray extension powered by rasterio
-- `salem <https://salem.readthedocs.io>`_: Adds geolocalised subsetting, masking, and plotting operations to xarray's data structures via accessors.
-- `SatPy <https://satpy.readthedocs.io/>`_ : Library for reading and manipulating meteorological remote sensing data and writing it to various image and data file formats.
-- `SARXarray <https://tudelftgeodesy.github.io/sarxarray/>`_: xarray extension for reading and processing large Synthetic Aperture Radar (SAR) data stacks.
-- `shxarray <https://shxarray.wobbly.earth/>`_: Convert, filter,and map geodesy related spherical harmonic representations of gravity and terrestrial water storage through an xarray extension.
-- `Spyfit <https://spyfit.readthedocs.io/en/master/>`_: FTIR spectroscopy of the atmosphere
-- `windspharm <https://ajdawson.github.io/windspharm/index.html>`_: Spherical
-  harmonic wind analysis in Python.
-- `wradlib <https://wradlib.org/>`_: An Open Source Library for Weather Radar Data Processing.
-- `wrf-python <https://wrf-python.readthedocs.io/>`_: A collection of diagnostic and interpolation routines for use with output of the Weather Research and Forecasting (WRF-ARW) Model.
-- `xarray-regrid <https://github.com/EXCITED-CO2/xarray-regrid>`_: xarray extension for regridding rectilinear data.
-- `xarray-simlab <https://xarray-simlab.readthedocs.io>`_: xarray extension for computer model simulations.
-- `xarray-spatial <https://xarray-spatial.org/>`_: Numba-accelerated raster-based spatial processing tools (NDVI, curvature, zonal-statistics, proximity, hillshading, viewshed, etc.)
-- `xarray-topo <https://xarray-topo.readthedocs.io/>`_: xarray extension for topographic analysis and modelling.
-- `xbpch <https://github.com/darothen/xbpch>`_: xarray interface for bpch files.
-- `xCDAT <https://xcdat.readthedocs.io/>`_: An extension of xarray for climate data analysis on structured grids.
-- `xclim <https://xclim.readthedocs.io/>`_: A library for calculating climate science indices with unit handling built from xarray and dask.
-- `xESMF <https://pangeo-xesmf.readthedocs.io/>`_: Universal regridder for geospatial data.
-- `xgcm <https://xgcm.readthedocs.io/>`_: Extends the xarray data model to understand finite volume grid cells (common in General Circulation Models) and provides interpolation and difference operations for such grids.
-- `xmitgcm <https://xmitgcm.readthedocs.io/>`_: a python package for reading `MITgcm <https://mitgcm.org/>`_ binary MDS files into xarray data structures.
-- `xnemogcm <https://github.com/rcaneill/xnemogcm/>`_: a package to read `NEMO <https://nemo-ocean.eu/>`_ output files and add attributes to interface with xgcm.
-
-Machine Learning
-~~~~~~~~~~~~~~~~
-- `ArviZ <https://arviz-devs.github.io/arviz/>`_: Exploratory analysis of Bayesian models, built on top of xarray.
-- `Darts <https://github.com/unit8co/darts/>`_: User-friendly modern machine learning for time series in Python.
-- `Elm <https://ensemble-learning-models.readthedocs.io>`_: Parallel machine learning on xarray data structures
-- `sklearn-xarray (1) <https://phausamann.github.io/sklearn-xarray>`_: Combines scikit-learn and xarray (1).
-- `sklearn-xarray (2) <https://sklearn-xarray.readthedocs.io/en/latest/>`_: Combines scikit-learn and xarray (2).
-- `xbatcher <https://xbatcher.readthedocs.io>`_: Batch Generation from Xarray Datasets.
-
-Other domains
-~~~~~~~~~~~~~
-- `ptsa <https://pennmem.github.io/ptsa/html/index.html>`_: EEG Time Series Analysis
-- `pycalphad <https://pycalphad.org/docs/latest/>`_: Computational Thermodynamics in Python
-- `pyomeca <https://pyomeca.github.io/>`_: Python framework for biomechanical analysis
-
-Extend xarray capabilities
-~~~~~~~~~~~~~~~~~~~~~~~~~~
-- `Collocate <https://github.com/cistools/collocate>`_: Collocate xarray trajectories in arbitrary physical dimensions
-- `eofs <https://ajdawson.github.io/eofs/>`_: EOF analysis in Python.
-- `hypothesis-gufunc <https://hypothesis-gufunc.readthedocs.io/en/latest/>`_: Extension to hypothesis. Makes it easy to write unit tests with xarray objects as input.
-- `ntv-pandas <https://github.com/loco-philippe/ntv-pandas>`_ : A tabular analyzer and a semantic, compact and reversible converter for multidimensional and tabular data
-- `nxarray <https://github.com/nxarray/nxarray>`_: NeXus input/output capability for xarray.
-- `xarray-compare <https://github.com/astropenguin/xarray-compare>`_: xarray extension for data comparison.
-- `xarray-dataclasses <https://github.com/astropenguin/xarray-dataclasses>`_: xarray extension for typed DataArray and Dataset creation.
-- `xarray_einstats <https://xarray-einstats.readthedocs.io>`_: Statistics, linear algebra and einops for xarray
-- `xarray_extras <https://github.com/crusaderky/xarray_extras>`_: Advanced algorithms for xarray objects (e.g. integrations/interpolations).
-- `xeofs <https://github.com/nicrie/xeofs>`_: PCA/EOF analysis and related techniques, integrated with xarray and Dask for efficient handling of large-scale data.
-- `xpublish <https://xpublish.readthedocs.io/>`_: Publish Xarray Datasets via a Zarr compatible REST API.
-- `xrft <https://github.com/rabernat/xrft>`_: Fourier transforms for xarray data.
-- `xr-scipy <https://xr-scipy.readthedocs.io>`_: A lightweight scipy wrapper for xarray.
-- `X-regression <https://github.com/kuchaale/X-regression>`_: Multiple linear regression from Statsmodels library coupled with Xarray library.
-- `xskillscore <https://github.com/xarray-contrib/xskillscore>`_: Metrics for verifying forecasts.
-- `xyzpy <https://xyzpy.readthedocs.io>`_: Easily generate high dimensional data, including parallelization.
-
-Visualization
-~~~~~~~~~~~~~
-- `datashader <https://datashader.org>`_, `geoviews <https://geoviews.org>`_, `holoviews <https://holoviews.org/>`_, : visualization packages for large data.
-- `hvplot <https://hvplot.pyviz.org/>`_ : A high-level plotting API for the PyData ecosystem built on HoloViews.
-- `psyplot <https://psyplot.readthedocs.io>`_: Interactive data visualization with python.
-- `xarray-leaflet <https://github.com/davidbrochart/xarray_leaflet>`_: An xarray extension for tiled map plotting based on ipyleaflet.
-- `xtrude <https://github.com/davidbrochart/xtrude>`_: An xarray extension for 3D terrain visualization based on pydeck.
-- `pyvista-xarray <https://github.com/pyvista/pyvista-xarray>`_: xarray DataArray accessor for 3D visualization with `PyVista <https://github.com/pyvista/pyvista>`_ and DataSet engines for reading VTK data formats.
-
-Non-Python projects
-~~~~~~~~~~~~~~~~~~~
-- `xframe <https://github.com/xtensor-stack/xframe>`_: C++ data structures inspired by xarray.
-- `AxisArrays <https://github.com/JuliaArrays/AxisArrays.jl>`_, `NamedArrays <https://github.com/davidavdav/NamedArrays.jl>`_ and `YAXArrays.jl <https://github.com/JuliaDataCubes/YAXArrays.jl>`_: similar data structures for Julia.
-
-More projects can be found at the `"xarray" Github topic <https://github.com/topics/xarray>`_.
diff -pruN 2025.03.1-8/doc/examples/ERA5-GRIB-example.ipynb 2025.10.1-1/doc/examples/ERA5-GRIB-example.ipynb
--- 2025.03.1-8/doc/examples/ERA5-GRIB-example.ipynb	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/doc/examples/ERA5-GRIB-example.ipynb	2025-10-10 10:38:05.000000000 +0000
@@ -21,7 +21,8 @@
    "outputs": [],
    "source": [
     "import xarray as xr\n",
-    "import matplotlib.pyplot as plt"
+    "import matplotlib.pyplot as plt\n",
+    "%matplotlib inline"
    ]
   },
   {
diff -pruN 2025.03.1-8/doc/examples/ROMS_ocean_model.ipynb 2025.10.1-1/doc/examples/ROMS_ocean_model.ipynb
--- 2025.03.1-8/doc/examples/ROMS_ocean_model.ipynb	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/doc/examples/ROMS_ocean_model.ipynb	2025-10-10 10:38:05.000000000 +0000
@@ -87,7 +87,7 @@
    "source": [
     "### Add a lazilly calculated vertical coordinates\n",
     "\n",
-    "Write equations to calculate the vertical coordinate. These will be only evaluated when data is requested. Information about the ROMS vertical coordinate can be found (here)[https://www.myroms.org/wiki/Vertical_S-coordinate]\n",
+    "Write equations to calculate the vertical coordinate. These will be only evaluated when data is requested. Information about the ROMS vertical coordinate can be found [here](https://www.myroms.org/wiki/Vertical_S-coordinate).\n",
     "\n",
     "In short, for `Vtransform==2` as used in this example, \n",
     "\n",
Binary files 2025.03.1-8/doc/examples/monthly_means_output.png and 2025.10.1-1/doc/examples/monthly_means_output.png differ
diff -pruN 2025.03.1-8/doc/examples/multidimensional-coords.ipynb 2025.10.1-1/doc/examples/multidimensional-coords.ipynb
--- 2025.03.1-8/doc/examples/multidimensional-coords.ipynb	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/doc/examples/multidimensional-coords.ipynb	2025-10-10 10:38:05.000000000 +0000
@@ -126,7 +126,7 @@
    "cell_type": "markdown",
    "metadata": {},
    "source": [
-    "In order to visualize the data on a conventional latitude-longitude grid, we can take advantage of xarray's ability to apply [cartopy](https://scitools.org.uk/cartopy/docs/latest/) map projections."
+    "In order to visualize the data on a conventional latitude-longitude grid, we can take advantage of xarray's ability to apply [cartopy](https://cartopy.readthedocs.io/stable/) map projections."
    ]
   },
   {
diff -pruN 2025.03.1-8/doc/examples/weather-data.ipynb 2025.10.1-1/doc/examples/weather-data.ipynb
--- 2025.03.1-8/doc/examples/weather-data.ipynb	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/doc/examples/weather-data.ipynb	2025-10-10 10:38:05.000000000 +0000
@@ -13,6 +13,20 @@
   {
    "cell_type": "code",
    "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "import numpy as np\n",
+    "import pandas as pd\n",
+    "import seaborn as sns\n",
+    "\n",
+    "import xarray as xr\n",
+    "%matplotlib inline"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
    "metadata": {
     "ExecuteTime": {
      "end_time": "2020-01-27T15:43:36.127628Z",
@@ -21,12 +35,6 @@
    },
    "outputs": [],
    "source": [
-    "import numpy as np\n",
-    "import pandas as pd\n",
-    "import seaborn as sns\n",
-    "\n",
-    "import xarray as xr\n",
-    "\n",
     "np.random.seed(123)\n",
     "\n",
     "xr.set_options(display_style=\"html\")\n",
diff -pruN 2025.03.1-8/doc/gallery.rst 2025.10.1-1/doc/gallery.rst
--- 2025.03.1-8/doc/gallery.rst	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/doc/gallery.rst	2025-10-10 10:38:05.000000000 +0000
@@ -3,7 +3,7 @@ Gallery
 
 Here's a list of examples on how to use xarray. We will be adding more examples soon.
 Contributions are highly welcomed and appreciated. So, if you are interested in contributing, please consult the
-:doc:`contributing` guide.
+:ref:`contributing` guide.
 
 
 
diff -pruN 2025.03.1-8/doc/get-help/faq.rst 2025.10.1-1/doc/get-help/faq.rst
--- 2025.03.1-8/doc/get-help/faq.rst	1970-01-01 00:00:00.000000000 +0000
+++ 2025.10.1-1/doc/get-help/faq.rst	2025-10-10 10:38:05.000000000 +0000
@@ -0,0 +1,448 @@
+.. _faq:
+
+Frequently Asked Questions
+==========================
+
+.. jupyter-execute::
+    :hide-code:
+
+    import numpy as np
+    import pandas as pd
+    import xarray as xr
+
+    np.random.seed(123456)
+
+
+Your documentation keeps mentioning pandas. What is pandas?
+-----------------------------------------------------------
+
+pandas_ is a very popular data analysis package in Python
+with wide usage in many fields. Our API is heavily inspired by pandas —
+this is why there are so many references to pandas.
+
+.. _pandas: https://pandas.pydata.org
+
+
+Do I need to know pandas to use xarray?
+---------------------------------------
+
+No! Our API is heavily inspired by pandas so while knowing pandas will let you
+become productive more quickly, knowledge of pandas is not necessary to use xarray.
+
+
+Should I use xarray instead of pandas?
+--------------------------------------
+
+It's not an either/or choice! xarray provides robust support for converting
+back and forth between the tabular data-structures of pandas and its own
+multi-dimensional data-structures.
+
+That said, you should only bother with xarray if some aspect of data is
+fundamentally multi-dimensional. If your data is unstructured or
+one-dimensional, pandas is usually the right choice: it has better performance
+for common operations such as ``groupby`` and you'll find far more usage
+examples online.
+
+
+Why is pandas not enough?
+-------------------------
+
+pandas is a fantastic library for analysis of low-dimensional labelled data -
+if it can be sensibly described as "rows and columns", pandas is probably the
+right choice.  However, sometimes we want to use higher dimensional arrays
+(`ndim > 2`), or arrays for which the order of dimensions (e.g., columns vs
+rows) shouldn't really matter. For example, the images of a movie can be
+natively represented as an array with four dimensions: time, row, column and
+color.
+
+pandas has historically supported N-dimensional panels, but deprecated them in
+version 0.20 in favor of xarray data structures. There are now built-in methods
+on both sides to convert between pandas and xarray, allowing for more focused
+development effort. Xarray objects have a much richer model of dimensionality -
+if you were using Panels:
+
+- You need to create a new factory type for each dimensionality.
+- You can't do math between NDPanels with different dimensionality.
+- Each dimension in a NDPanel has a name (e.g., 'labels', 'items',
+  'major_axis', etc.) but the dimension names refer to order, not their
+  meaning. You can't specify an operation as to be applied along the "time"
+  axis.
+- You often have to manually convert collections of pandas arrays
+  (Series, DataFrames, etc) to have the same number of dimensions.
+  In contrast, this sort of data structure fits very naturally in an
+  xarray ``Dataset``.
+
+You can :ref:`read about switching from Panels to xarray here <panel transition>`.
+pandas gets a lot of things right, but many science, engineering and complex
+analytics use cases need fully multi-dimensional data structures.
+
+How do xarray data structures differ from those found in pandas?
+----------------------------------------------------------------
+
+The main distinguishing feature of xarray's ``DataArray`` over labeled arrays in
+pandas is that dimensions can have names (e.g., "time", "latitude",
+"longitude"). Names are much easier to keep track of than axis numbers, and
+xarray uses dimension names for indexing, aggregation and broadcasting. Not only
+can you write ``x.sel(time='2000-01-01')`` and  ``x.mean(dim='time')``, but
+operations like ``x - x.mean(dim='time')`` always work, no matter the order
+of the "time" dimension. You never need to reshape arrays (e.g., with
+``np.newaxis``) to align them for arithmetic operations in xarray.
+
+
+Why don't aggregations return Python scalars?
+---------------------------------------------
+
+Xarray tries hard to be self-consistent: operations on a ``DataArray`` (resp.
+``Dataset``) return another ``DataArray`` (resp. ``Dataset``) object. In
+particular, operations returning scalar values (e.g. indexing or aggregations
+like ``mean`` or ``sum`` applied to all axes) will also return xarray objects.
+
+Unfortunately, this means we sometimes have to explicitly cast our results from
+xarray when using them in other libraries. As an illustration, the following
+code fragment
+
+.. jupyter-execute::
+
+    arr = xr.DataArray([1, 2, 3])
+    pd.Series({"x": arr[0], "mean": arr.mean(), "std": arr.std()})
+
+does not yield the pandas DataFrame we expected. We need to specify the type
+conversion ourselves:
+
+.. jupyter-execute::
+
+    pd.Series({"x": arr[0], "mean": arr.mean(), "std": arr.std()}, dtype=float)
+
+Alternatively, we could use the ``item`` method or the ``float`` constructor to
+convert values one at a time
+
+.. jupyter-execute::
+
+    pd.Series({"x": arr[0].item(), "mean": float(arr.mean())})
+
+
+.. _approach to metadata:
+
+What is your approach to metadata?
+----------------------------------
+
+We are firm believers in the power of labeled data! In addition to dimensions
+and coordinates, xarray supports arbitrary metadata in the form of global
+(Dataset) and variable specific (DataArray) attributes (``attrs``).
+
+Automatic interpretation of labels is powerful but also reduces flexibility.
+With xarray, we draw a firm line between labels that the library understands
+(``dims`` and ``coords``) and labels for users and user code (``attrs``). For
+example, we do not automatically interpret and enforce units or `CF
+conventions`_. (An exception is serialization to and from netCDF files.)
+
+.. _CF conventions: https://cfconventions.org/latest.html
+
+An implication of this choice is that we do not propagate ``attrs`` through
+most operations unless explicitly flagged (some methods have a ``keep_attrs``
+option, and there is a global flag, accessible with :py:func:`xarray.set_options`,
+for setting this to be always True or False). Similarly, xarray does not check
+for conflicts between ``attrs`` when combining arrays and datasets, unless
+explicitly requested with the option ``compat='identical'``. The guiding
+principle is that metadata should not be allowed to get in the way.
+
+In general xarray uses the capabilities of the backends for reading and writing
+attributes. That has some implications on roundtripping. One example for such inconsistency is that size-1 lists will roundtrip as single element (for netcdf4 backends).
+
+What other netCDF related Python libraries should I know about?
+---------------------------------------------------------------
+
+`netCDF4-python`__ provides a lower level interface for working with
+netCDF and OpenDAP datasets in Python. We use netCDF4-python internally in
+xarray, and have contributed a number of improvements and fixes upstream. Xarray
+does not yet support all of netCDF4-python's features, such as modifying files
+on-disk.
+
+__ https://unidata.github.io/netcdf4-python/
+
+Iris_ (supported by the UK Met office) provides similar tools for in-
+memory manipulation of labeled arrays, aimed specifically at weather and
+climate data needs. Indeed, the Iris :py:class:`~iris.cube.Cube` was direct
+inspiration for xarray's :py:class:`~xarray.DataArray`. Xarray and Iris take very
+different approaches to handling metadata: Iris strictly interprets
+`CF conventions`_. Iris particularly shines at mapping, thanks to its
+integration with Cartopy_.
+
+.. _Iris: https://scitools-iris.readthedocs.io/en/stable/
+.. _Cartopy: https://cartopy.readthedocs.io/stable/
+
+We think the design decisions we have made for xarray (namely, basing it on
+pandas) make it a faster and more flexible data analysis tool. That said, Iris
+has some great domain specific functionality, and there are dedicated methods for
+converting back and forth between xarray and Iris. See
+:ref:`Reading and Writing Iris data <io.iris>` for more details.
+
+What other projects leverage xarray?
+------------------------------------
+
+See section :ref:`ecosystem`.
+
+How do I open format X file as an xarray dataset?
+-------------------------------------------------
+
+To open format X file in xarray, you need to know the `format of the data <https://docs.xarray.dev/en/stable/user-guide/io.html#csv-and-other-formats-supported-by-pandas/>`_ you want to read. If the format is supported, you can use the appropriate function provided by xarray. The following table provides functions used for different file formats in xarray, as well as links to other packages that can be used:
+
+.. csv-table::
+   :header: "File Format", "Open via", " Related Packages"
+   :widths: 15, 45, 15
+
+   "NetCDF (.nc, .nc4, .cdf)","``open_dataset()`` OR ``open_mfdataset()``", "`netCDF4 <https://pypi.org/project/netCDF4/>`_, `cdms2 <https://cdms.readthedocs.io/en/latest/cdms2.html>`_"
+   "HDF5 (.h5, .hdf5)","``open_dataset()`` OR ``open_mfdataset()``", "`h5py <https://www.h5py.org/>`_, `pytables <https://www.pytables.org/>`_ "
+   "GRIB (.grb, .grib)", "``open_dataset()``", "`cfgrib <https://pypi.org/project/cfgrib/>`_, `pygrib <https://pypi.org/project/pygrib/>`_"
+   "CSV (.csv)","``open_dataset()``", "`pandas`_ , `dask <https://www.dask.org/>`_"
+   "Zarr (.zarr)","``open_dataset()`` OR ``open_mfdataset()``", "`zarr <https://pypi.org/project/zarr/>`_ , `dask <https://www.dask.org/>`_ "
+
+.. _pandas: https://pandas.pydata.org
+
+If you are unable to open a file in xarray:
+
+- You should check that you are having all necessary dependencies installed, including any optional dependencies (like scipy, h5netcdf, cfgrib etc as mentioned below) that may be required for the specific use case.
+
+- If all necessary dependencies are installed but the file still cannot be opened, you must check if there are any specialized backends available for the specific file format you are working with. You can consult the xarray documentation or the documentation for the file format to determine if a specialized backend is required, and if so, how to install and use it with xarray.
+
+- If the file format is not supported by xarray or any of its available backends, the user may need to use a different library or tool to work with the file. You can consult the documentation for the file format to determine which tools are recommended for working with it.
+
+Xarray provides a default engine to read files, which is usually determined by the file extension or type. If you don't specify the engine, xarray will try to guess it based on the file extension or type, and may fall back to a different engine if it cannot determine the correct one.
+
+Therefore, it's good practice to always specify the engine explicitly, to ensure that the correct backend is used and especially when working with complex data formats or non-standard file extensions.
+
+:py:func:`xarray.backends.list_engines` is a function in xarray that returns a dictionary of available engines and their BackendEntrypoint objects.
+
+You can use the ``engine`` argument to specify the backend when calling ``open_dataset()`` or other reading functions in xarray, as shown below:
+
+NetCDF
+~~~~~~
+If you are reading a netCDF file with a ".nc" extension, the default engine is ``netcdf4``. However if you have files with non-standard extensions or if the file format is ambiguous. Specify the engine explicitly, to ensure that the correct backend is used.
+
+Use :py:func:`~xarray.open_dataset` to open a NetCDF file and return an xarray Dataset object.
+
+.. code:: python
+
+    import xarray as xr
+
+    # use xarray to open the file and return an xarray.Dataset object using netcdf4 engine
+
+    ds = xr.open_dataset("/path/to/my/file.nc", engine="netcdf4")
+
+    # Print Dataset object
+
+    print(ds)
+
+    # use xarray to open the file and return an xarray.Dataset object using scipy engine
+
+    ds = xr.open_dataset("/path/to/my/file.nc", engine="scipy")
+
+We recommend installing ``scipy`` via conda using the below given code:
+
+::
+
+    conda install scipy
+
+HDF5
+~~~~
+Use :py:func:`~xarray.open_dataset` to open an HDF5 file and return an xarray Dataset object.
+
+You should specify the ``engine`` keyword argument when reading HDF5 files with xarray, as there are multiple backends that can be used to read HDF5 files, and xarray may not always be able to automatically detect the correct one based on the file extension or file format.
+
+To read HDF5 files with xarray, you can use the :py:func:`~xarray.open_dataset` function from the ``h5netcdf`` backend, as follows:
+
+.. code:: python
+
+    import xarray as xr
+
+    # Open HDF5 file as an xarray Dataset
+
+    ds = xr.open_dataset("path/to/hdf5/file.hdf5", engine="h5netcdf")
+
+    # Print Dataset object
+
+    print(ds)
+
+We recommend you to install ``h5netcdf`` library using the below given code:
+
+::
+
+    conda install -c conda-forge h5netcdf
+
+If you want to use the ``netCDF4`` backend to read a file with a ".h5" extension (which is typically associated with HDF5 file format), you can specify the engine argument as follows:
+
+.. code:: python
+
+    ds = xr.open_dataset("path/to/file.h5", engine="netcdf4")
+
+GRIB
+~~~~
+You should specify the ``engine`` keyword argument when reading GRIB files with xarray, as there are multiple backends that can be used to read GRIB files, and xarray may not always be able to automatically detect the correct one based on the file extension or file format.
+
+Use the :py:func:`~xarray.open_dataset` function from the ``cfgrib`` package to open a GRIB file as an xarray Dataset.
+
+.. code:: python
+
+    import xarray as xr
+
+    # define the path to your GRIB file and the engine you want to use to open the file
+    # use ``open_dataset()`` to open the file with the specified engine and return an xarray.Dataset object
+
+    ds = xr.open_dataset("path/to/your/file.grib", engine="cfgrib")
+
+    # Print Dataset object
+
+    print(ds)
+
+We recommend installing ``cfgrib`` via conda using the below given code:
+
+::
+
+    conda install -c conda-forge cfgrib
+
+CSV
+~~~
+By default, xarray uses the built-in ``pandas`` library to read CSV files. In general, you don't need to specify the engine keyword argument when reading CSV files with xarray, as the default ``pandas`` engine is usually sufficient for most use cases. If you are working with very large CSV files or if you need to perform certain types of data processing that are not supported by the default ``pandas`` engine, you may want to use a different backend.
+In such cases, you can specify the engine argument when reading the CSV file with xarray.
+
+To read CSV files with xarray, use the :py:func:`~xarray.open_dataset` function and specify the path to the CSV file as follows:
+
+.. code:: python
+
+    import xarray as xr
+    import pandas as pd
+
+    # Load CSV file into pandas DataFrame using the "c" engine
+
+    df = pd.read_csv("your_file.csv", engine="c")
+
+    # Convert `:py:func:pandas` DataFrame to xarray.Dataset
+
+    ds = xr.Dataset.from_dataframe(df)
+
+    # Prints the resulting xarray dataset
+
+    print(ds)
+
+Zarr
+~~~~
+When opening a Zarr dataset with xarray, the ``engine`` is automatically detected based on the file extension or the type of input provided. If the dataset is stored in a directory with a ".zarr" extension, xarray will automatically use the "zarr" engine.
+
+To read zarr files with xarray, use the :py:func:`~xarray.open_dataset` function and specify the path to the zarr file as follows:
+
+.. code:: python
+
+    import xarray as xr
+
+    # use xarray to open the file and return an xarray.Dataset object using zarr engine
+
+    ds = xr.open_dataset("path/to/your/file.zarr", engine="zarr")
+
+    # Print Dataset object
+
+    print(ds)
+
+We recommend installing ``zarr`` via conda using the below given code:
+
+::
+
+    conda install -c conda-forge zarr
+
+There may be situations where you need to specify the engine manually using the ``engine`` keyword argument. For example, if you have a Zarr dataset stored in a file with a different extension (e.g., ".npy"), you will need to specify the engine as "zarr" explicitly when opening the dataset.
+
+Some packages may have additional functionality beyond what is shown here. You can refer to the documentation for each package for more information.
+
+How does xarray handle missing values?
+--------------------------------------
+
+**xarray can handle missing values using ``np.nan``**
+
+- ``np.nan`` is  used to represent missing values in labeled arrays and datasets. It is a commonly used standard for representing missing or undefined numerical data in scientific computing. ``np.nan`` is a constant value in NumPy that represents "Not a Number" or missing values.
+
+- Most of xarray's computation methods are designed to automatically handle missing values appropriately.
+
+  For example, when performing operations like addition or multiplication on arrays that contain missing values, xarray will automatically ignore the missing values and only perform the operation on the valid data. This makes it easy to work with data that may contain missing or undefined values without having to worry about handling them explicitly.
+
+- Many of xarray's `aggregation methods <https://docs.xarray.dev/en/stable/user-guide/computation.html#aggregation>`_, such as ``sum()``, ``mean()``, ``min()``, ``max()``, and others, have a skipna argument that controls whether missing values (represented by NaN) should be skipped (True) or treated as NaN (False) when performing the calculation.
+
+  By default, ``skipna`` is set to ``True``, so missing values are ignored when computing the result. However, you can set ``skipna`` to ``False`` if you want missing values to be treated as NaN and included in the calculation.
+
+- On `plotting <https://docs.xarray.dev/en/stable/user-guide/plotting.html#missing-values>`_ an xarray dataset or array that contains missing values, xarray will simply leave the missing values as blank spaces in the plot.
+
+- We have a set of `methods <https://docs.xarray.dev/en/stable/user-guide/computation.html#missing-values>`_ for manipulating missing and filling values.
+
+How should I cite xarray?
+-------------------------
+
+If you are using xarray and would like to cite it in academic publication, we
+would certainly appreciate it. We recommend two citations.
+
+  1. At a minimum, we recommend citing the xarray overview journal article,
+     published in the Journal of Open Research Software.
+
+     - Hoyer, S. & Hamman, J., (2017). xarray: N-D labeled Arrays and
+       Datasets in Python. Journal of Open Research Software. 5(1), p.10.
+       DOI: https://doi.org/10.5334/jors.148
+
+       Here’s an example of a BibTeX entry::
+
+           @article{hoyer2017xarray,
+             title     = {xarray: {N-D} labeled arrays and datasets in {Python}},
+             author    = {Hoyer, S. and J. Hamman},
+             journal   = {Journal of Open Research Software},
+             volume    = {5},
+             number    = {1},
+             year      = {2017},
+             publisher = {Ubiquity Press},
+             doi       = {10.5334/jors.148},
+             url       = {https://doi.org/10.5334/jors.148}
+           }
+
+  2. You may also want to cite a specific version of the xarray package. We
+     provide a `Zenodo citation and DOI <https://doi.org/10.5281/zenodo.598201>`_
+     for this purpose:
+
+        .. image:: https://zenodo.org/badge/doi/10.5281/zenodo.598201.svg
+           :target: https://doi.org/10.5281/zenodo.598201
+
+       An example BibTeX entry::
+
+           @misc{xarray_v0_8_0,
+                 author = {Stephan Hoyer and Clark Fitzgerald and Joe Hamman and others},
+                 title  = {xarray: v0.8.0},
+                 month  = aug,
+                 year   = 2016,
+                 doi    = {10.5281/zenodo.59499},
+                 url    = {https://doi.org/10.5281/zenodo.59499}
+                }
+
+.. _api-stability:
+
+How stable is Xarray's API?
+---------------------------
+
+Xarray tries very hard to maintain backwards compatibility in our :ref:`api` between released versions.
+Whilst we do occasionally make breaking changes in order to improve the library,
+we `signpost changes <https://docs.xarray.dev/en/stable/contributing.html#backwards-compatibility>`_ with ``DeprecationWarnings`` for many releases in advance.
+(An exception is bugs - whose behaviour we try to fix as soon as we notice them.)
+Our `test-driven development practices <https://docs.xarray.dev/en/stable/contributing.html#test-driven-development-code-writing>`_ helps to ensure any accidental regressions are caught.
+This philosophy applies to everything in the `public API <https://docs.xarray.dev/en/stable/getting-started-guide/faq.html#what-parts-of-xarray-are-considered-public-api>`_.
+
+.. _public-api:
+
+What parts of xarray are considered public API?
+-----------------------------------------------
+
+As a rule, only functions/methods documented in our :ref:`api` are considered
+part of xarray's public API. Everything else (in particular, everything in
+``xarray.core`` that is not also exposed in the top level ``xarray`` namespace)
+is considered a private implementation detail that may change at any time.
+
+Objects that exist to facilitate xarray's fluent interface on ``DataArray`` and
+``Dataset`` objects are a special case. For convenience, we document them in
+the API docs, but only their methods and the ``DataArray``/``Dataset``
+methods/properties to construct them (e.g., ``.plot()``, ``.groupby()``,
+``.str``) are considered public API. Constructors and other details of the
+internal classes used to implemented them (i.e.,
+``xarray.plot.plotting._PlotMethods``, ``xarray.core.groupby.DataArrayGroupBy``,
+``xarray.core.accessor_str.StringAccessor``) are not.
diff -pruN 2025.03.1-8/doc/get-help/help-diagram.rst 2025.10.1-1/doc/get-help/help-diagram.rst
--- 2025.03.1-8/doc/get-help/help-diagram.rst	1970-01-01 00:00:00.000000000 +0000
+++ 2025.10.1-1/doc/get-help/help-diagram.rst	2025-10-10 10:38:05.000000000 +0000
@@ -0,0 +1,104 @@
+Getting Help
+============
+
+Navigating the wealth of resources available for Xarray can be overwhelming.
+We've created this flow chart to help guide you towards the best way to get help, depending on what you're working towards.
+
+Also be sure to check out our :ref:`faq`. and :ref:`howdoi` pages for solutions to common questions.
+
+A major strength of Xarray is in the user community. Sometimes you might not yet have a concrete question but would simply like to connect with other Xarray users. We have a few accounts on different social platforms for that! :ref:`socials`.
+
+We look forward to hearing from you!
+
+Help Flowchart
+--------------
+..
+   _comment: mermaid Flowcharg "link" text gets secondary color background, SVG icon fill gets primary color
+
+.. raw:: html
+
+    <style>
+      /* Ensure PST link colors don't override mermaid text colors */
+      .mermaid a {
+        color: white;
+      }
+      .mermaid a:hover {
+        color: magenta;
+        text-decoration-color: magenta;
+      }
+      .mermaid a:visited {
+        color: white;
+        text-decoration-color: white;
+      }
+    </style>
+
+.. mermaid::
+    :config: {"theme":"base","themeVariables":{"fontSize":"20px","primaryColor":"#fff","primaryTextColor":"#fff","primaryBorderColor":"#59c7d6","lineColor":"#e28126","secondaryColor":"#767985"}}
+    :alt: Flowchart illustrating the different ways to access help using or contributing to Xarray.
+
+    flowchart TD
+        intro[Welcome to Xarray! How can we help?]:::quesNodefmt
+        usage([fa:fa-chalkboard-user <a href="https://tutorial.xarray.dev">Xarray Tutorial</a>
+            fab:fa-readme <a href="https://docs.xarray.dev">Xarray Docs</a>
+            fab:fa-stack-overflow <a href="https://stackoverflow.com/questions/tagged/python-xarray">Stack Exchange</a>
+            fab:fa-google <a href="https://www.google.com">Ask Google</a>
+            fa:fa-robot Ask AI ChatBot]):::ansNodefmt
+        extensions([Extension docs:
+            fab:fa-readme <a href="https://docs.dask.org">Dask</a>
+            fab:fa-readme <a href="https://corteva.github.io/rioxarray">Rioxarray</a>]):::ansNodefmt
+        help([fab:fa-github <a href="https://github.com/pydata/xarray/discussions">Xarray Discussions</a>
+            fab:fa-discord <a href="https://discord.com/invite/wEKPCt4PDu">Xarray Discord</a>
+            fa:fa-globe <a href="https://discourse.pangeo.io">Pangeo Discourse</a>]):::ansNodefmt
+        bug([Let us know:
+            fab:fa-github <a href="https://github.com/pydata/xarray/issues">Xarray Issues</a>]):::ansNodefmt
+        contrib([fa:fa-book-open <a href="https://docs.xarray.dev/en/latest/contribute">Xarray Contributor's Guide</a>]):::ansNodefmt
+        pr([fab:fa-github <a href="https://github.com/pydata/xarray/pulls">Pull Request</a>]):::ansNodefmt
+        dev([fab:fa-github Add PR Comment
+            fa:fa-users <a href="https://docs.xarray.dev/en/stable/contribute/developers-meeting.html">Attend Developer's Meeting</a> ]):::ansNodefmt
+        report[Thanks for letting us know!]:::quesNodefmt
+        merged[fa:fa-hands-clapping Thanks for contributing to Xarray!]:::quesNodefmt
+
+
+        intro -->|How do I use Xarray?| usage
+        usage -->|"With extensions (like Dask, Rioxarray, etc.)"| extensions
+
+        usage -->|I still have questions or could use some guidance | help
+        intro -->|I think I found a bug| bug
+        bug
+        contrib
+        bug -->|I just wanted to tell you| report
+        bug<-->|I'd like to fix the bug!| contrib
+        pr -->|my PR was approved| merged
+
+
+        intro -->|I wish Xarray could...| bug
+
+
+        pr <-->|my PR is quiet| dev
+        contrib -->pr
+
+        classDef quesNodefmt font-size:20pt,fill:#0e4666,stroke:#59c7d6,stroke-width:3
+        classDef ansNodefmt font-size:18pt,fill:#4a4a4a,stroke:#17afb4,stroke-width:3
+        linkStyle default font-size:16pt,stroke-width:4
+
+
+Flowchart links
+---------------
+- `Xarray Tutorials <https://tutorial.xarray.dev/>`__
+- `Xarray Docs <https://docs.xarray.dev>`__
+- `Stack Exchange <https://stackoverflow.com/questions/tagged/python-xarray>`__
+- `Xarray Discussions <https://github.com/pydata/xarray/discussions>`__
+- `Xarray Discord <https://discord.com/invite/wEKPCt4PDu>`__
+- `Xarray Office Hours <https://github.com/pydata/xarray/discussions/categories/office-hours>`__
+- `Pangeo Discourse <https://discourse.pangeo.io/>`__
+- `Xarray Issues <https://github.com/pydata/xarray/issues>`__
+- :ref:`contributing`
+- :ref:`developers-meeting`
+
+.. toctree::
+   :maxdepth: 1
+   :hidden:
+
+   faq
+   howdoi
+   socials
diff -pruN 2025.03.1-8/doc/get-help/howdoi.rst 2025.10.1-1/doc/get-help/howdoi.rst
--- 2025.03.1-8/doc/get-help/howdoi.rst	1970-01-01 00:00:00.000000000 +0000
+++ 2025.10.1-1/doc/get-help/howdoi.rst	2025-10-10 10:38:05.000000000 +0000
@@ -0,0 +1,77 @@
+.. currentmodule:: xarray
+
+.. _howdoi:
+
+How do I ...
+============
+
+.. list-table::
+   :header-rows: 1
+   :widths: 40 60
+
+   * - How do I...
+     - Solution
+   * - add a DataArray to my dataset as a new variable
+     - ``my_dataset[varname] = my_dataArray`` or :py:meth:`Dataset.assign` (see also :ref:`dictionary_like_methods`)
+   * - add variables from other datasets to my dataset
+     - :py:meth:`Dataset.merge`
+   * - add a new dimension and/or coordinate
+     - :py:meth:`DataArray.expand_dims`, :py:meth:`Dataset.expand_dims`
+   * - add a new coordinate variable
+     - :py:meth:`DataArray.assign_coords`
+   * - change a data variable to a coordinate variable
+     - :py:meth:`Dataset.set_coords`
+   * - change the order of dimensions
+     - :py:meth:`DataArray.transpose`, :py:meth:`Dataset.transpose`
+   * - reshape dimensions
+     - :py:meth:`DataArray.stack`, :py:meth:`Dataset.stack`, :py:meth:`Dataset.coarsen.construct`, :py:meth:`DataArray.coarsen.construct`
+   * - remove a variable from my object
+     - :py:meth:`Dataset.drop_vars`, :py:meth:`DataArray.drop_vars`
+   * - remove dimensions of length 1 or 0
+     - :py:meth:`DataArray.squeeze`, :py:meth:`Dataset.squeeze`
+   * - remove all variables with a particular dimension
+     - :py:meth:`Dataset.drop_dims`
+   * - convert non-dimension coordinates to data variables or remove them
+     - :py:meth:`DataArray.reset_coords`, :py:meth:`Dataset.reset_coords`
+   * - rename a variable, dimension or coordinate
+     - :py:meth:`Dataset.rename`, :py:meth:`DataArray.rename`, :py:meth:`Dataset.rename_vars`, :py:meth:`Dataset.rename_dims`,
+   * - convert a DataArray to Dataset or vice versa
+     - :py:meth:`DataArray.to_dataset`, :py:meth:`Dataset.to_dataarray`, :py:meth:`Dataset.to_stacked_array`, :py:meth:`DataArray.to_unstacked_dataset`
+   * - extract variables that have certain attributes
+     - :py:meth:`Dataset.filter_by_attrs`
+   * - extract the underlying array (e.g. NumPy or Dask arrays)
+     - :py:attr:`DataArray.data`
+   * - convert to and extract the underlying NumPy array
+     - :py:attr:`DataArray.to_numpy`
+   * - convert to a pandas DataFrame
+     - :py:attr:`Dataset.to_dataframe`
+   * - sort values
+     - :py:attr:`Dataset.sortby`
+   * - find out if my xarray object is wrapping a Dask Array
+     - :py:func:`dask.is_dask_collection`
+   * - know how much memory my object requires
+     - :py:attr:`DataArray.nbytes`, :py:attr:`Dataset.nbytes`
+   * - Get axis number for a dimension
+     - :py:meth:`DataArray.get_axis_num`
+   * - convert a possibly irregularly sampled timeseries to a regularly sampled timeseries
+     - :py:meth:`DataArray.resample`, :py:meth:`Dataset.resample` (see :ref:`resampling` for more)
+   * - apply a function on all data variables in a Dataset
+     - :py:meth:`Dataset.map`
+   * - write xarray objects with complex values to a netCDF file
+     - :py:func:`Dataset.to_netcdf`, :py:func:`DataArray.to_netcdf` specifying ``engine="h5netcdf"`` or :py:func:`Dataset.to_netcdf`, :py:func:`DataArray.to_netcdf` specifying ``engine="netCDF4", auto_complex=True``
+   * - make xarray objects look like other xarray objects
+     - :py:func:`~xarray.ones_like`, :py:func:`~xarray.zeros_like`, :py:func:`~xarray.full_like`, :py:meth:`Dataset.reindex_like`, :py:meth:`Dataset.interp_like`, :py:meth:`Dataset.broadcast_like`, :py:meth:`DataArray.reindex_like`, :py:meth:`DataArray.interp_like`, :py:meth:`DataArray.broadcast_like`
+   * - Make sure my datasets have values at the same coordinate locations
+     - ``xr.align(dataset_1, dataset_2, join="exact")``
+   * - replace NaNs with other values
+     - :py:meth:`Dataset.fillna`, :py:meth:`Dataset.ffill`, :py:meth:`Dataset.bfill`, :py:meth:`Dataset.interpolate_na`, :py:meth:`DataArray.fillna`, :py:meth:`DataArray.ffill`, :py:meth:`DataArray.bfill`, :py:meth:`DataArray.interpolate_na`
+   * - extract the year, month, day or similar from a DataArray of time values
+     - ``obj.dt.month`` for example where ``obj`` is a :py:class:`~xarray.DataArray` containing ``datetime64`` or ``cftime`` values. See :ref:`dt_accessor` for more.
+   * - round off time values to a specified frequency
+     - ``obj.dt.ceil``, ``obj.dt.floor``, ``obj.dt.round``. See :ref:`dt_accessor` for more.
+   * - make a mask that is ``True`` where an object contains any of the values in an array
+     - :py:meth:`Dataset.isin`, :py:meth:`DataArray.isin`
+   * - Index using a boolean mask
+     - :py:meth:`Dataset.query`, :py:meth:`DataArray.query`, :py:meth:`Dataset.where`, :py:meth:`DataArray.where`
+   * - preserve ``attrs`` during (most) xarray operations
+     - ``xr.set_options(keep_attrs=True)``
diff -pruN 2025.03.1-8/doc/get-help/socials.rst 2025.10.1-1/doc/get-help/socials.rst
--- 2025.03.1-8/doc/get-help/socials.rst	1970-01-01 00:00:00.000000000 +0000
+++ 2025.10.1-1/doc/get-help/socials.rst	2025-10-10 10:38:05.000000000 +0000
@@ -0,0 +1,10 @@
+.. _socials:
+
+Social Media
+============
+
+Xarray is active on several social media platforms. We use these platforms to share updates and connect with the user community.
+
+- `Discord <https://discord.com/invite/wEKPCt4PDu>`__
+- `Bluesky <https://bsky.app/profile/xarray.bsky.social>`__
+- `Twitter(X) <https://x.com/xarray_dev>`__
diff -pruN 2025.03.1-8/doc/getting-started-guide/faq.rst 2025.10.1-1/doc/getting-started-guide/faq.rst
--- 2025.03.1-8/doc/getting-started-guide/faq.rst	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/doc/getting-started-guide/faq.rst	1970-01-01 00:00:00.000000000 +0000
@@ -1,448 +0,0 @@
-.. _faq:
-
-Frequently Asked Questions
-==========================
-
-.. ipython:: python
-    :suppress:
-
-    import numpy as np
-    import pandas as pd
-    import xarray as xr
-
-    np.random.seed(123456)
-
-
-Your documentation keeps mentioning pandas. What is pandas?
------------------------------------------------------------
-
-pandas_ is a very popular data analysis package in Python
-with wide usage in many fields. Our API is heavily inspired by pandas —
-this is why there are so many references to pandas.
-
-.. _pandas: https://pandas.pydata.org
-
-
-Do I need to know pandas to use xarray?
----------------------------------------
-
-No! Our API is heavily inspired by pandas so while knowing pandas will let you
-become productive more quickly, knowledge of pandas is not necessary to use xarray.
-
-
-Should I use xarray instead of pandas?
---------------------------------------
-
-It's not an either/or choice! xarray provides robust support for converting
-back and forth between the tabular data-structures of pandas and its own
-multi-dimensional data-structures.
-
-That said, you should only bother with xarray if some aspect of data is
-fundamentally multi-dimensional. If your data is unstructured or
-one-dimensional, pandas is usually the right choice: it has better performance
-for common operations such as ``groupby`` and you'll find far more usage
-examples online.
-
-
-Why is pandas not enough?
--------------------------
-
-pandas is a fantastic library for analysis of low-dimensional labelled data -
-if it can be sensibly described as "rows and columns", pandas is probably the
-right choice.  However, sometimes we want to use higher dimensional arrays
-(`ndim > 2`), or arrays for which the order of dimensions (e.g., columns vs
-rows) shouldn't really matter. For example, the images of a movie can be
-natively represented as an array with four dimensions: time, row, column and
-color.
-
-pandas has historically supported N-dimensional panels, but deprecated them in
-version 0.20 in favor of xarray data structures. There are now built-in methods
-on both sides to convert between pandas and xarray, allowing for more focused
-development effort. Xarray objects have a much richer model of dimensionality -
-if you were using Panels:
-
-- You need to create a new factory type for each dimensionality.
-- You can't do math between NDPanels with different dimensionality.
-- Each dimension in a NDPanel has a name (e.g., 'labels', 'items',
-  'major_axis', etc.) but the dimension names refer to order, not their
-  meaning. You can't specify an operation as to be applied along the "time"
-  axis.
-- You often have to manually convert collections of pandas arrays
-  (Series, DataFrames, etc) to have the same number of dimensions.
-  In contrast, this sort of data structure fits very naturally in an
-  xarray ``Dataset``.
-
-You can :ref:`read about switching from Panels to xarray here <panel transition>`.
-pandas gets a lot of things right, but many science, engineering and complex
-analytics use cases need fully multi-dimensional data structures.
-
-How do xarray data structures differ from those found in pandas?
-----------------------------------------------------------------
-
-The main distinguishing feature of xarray's ``DataArray`` over labeled arrays in
-pandas is that dimensions can have names (e.g., "time", "latitude",
-"longitude"). Names are much easier to keep track of than axis numbers, and
-xarray uses dimension names for indexing, aggregation and broadcasting. Not only
-can you write ``x.sel(time='2000-01-01')`` and  ``x.mean(dim='time')``, but
-operations like ``x - x.mean(dim='time')`` always work, no matter the order
-of the "time" dimension. You never need to reshape arrays (e.g., with
-``np.newaxis``) to align them for arithmetic operations in xarray.
-
-
-Why don't aggregations return Python scalars?
----------------------------------------------
-
-Xarray tries hard to be self-consistent: operations on a ``DataArray`` (resp.
-``Dataset``) return another ``DataArray`` (resp. ``Dataset``) object. In
-particular, operations returning scalar values (e.g. indexing or aggregations
-like ``mean`` or ``sum`` applied to all axes) will also return xarray objects.
-
-Unfortunately, this means we sometimes have to explicitly cast our results from
-xarray when using them in other libraries. As an illustration, the following
-code fragment
-
-.. ipython:: python
-
-    arr = xr.DataArray([1, 2, 3])
-    pd.Series({"x": arr[0], "mean": arr.mean(), "std": arr.std()})
-
-does not yield the pandas DataFrame we expected. We need to specify the type
-conversion ourselves:
-
-.. ipython:: python
-
-    pd.Series({"x": arr[0], "mean": arr.mean(), "std": arr.std()}, dtype=float)
-
-Alternatively, we could use the ``item`` method or the ``float`` constructor to
-convert values one at a time
-
-.. ipython:: python
-
-    pd.Series({"x": arr[0].item(), "mean": float(arr.mean())})
-
-
-.. _approach to metadata:
-
-What is your approach to metadata?
-----------------------------------
-
-We are firm believers in the power of labeled data! In addition to dimensions
-and coordinates, xarray supports arbitrary metadata in the form of global
-(Dataset) and variable specific (DataArray) attributes (``attrs``).
-
-Automatic interpretation of labels is powerful but also reduces flexibility.
-With xarray, we draw a firm line between labels that the library understands
-(``dims`` and ``coords``) and labels for users and user code (``attrs``). For
-example, we do not automatically interpret and enforce units or `CF
-conventions`_. (An exception is serialization to and from netCDF files.)
-
-.. _CF conventions: https://cfconventions.org/latest.html
-
-An implication of this choice is that we do not propagate ``attrs`` through
-most operations unless explicitly flagged (some methods have a ``keep_attrs``
-option, and there is a global flag, accessible with :py:func:`xarray.set_options`,
-for setting this to be always True or False). Similarly, xarray does not check
-for conflicts between ``attrs`` when combining arrays and datasets, unless
-explicitly requested with the option ``compat='identical'``. The guiding
-principle is that metadata should not be allowed to get in the way.
-
-In general xarray uses the capabilities of the backends for reading and writing
-attributes. That has some implications on roundtripping. One example for such inconsistency is that size-1 lists will roundtrip as single element (for netcdf4 backends).
-
-What other netCDF related Python libraries should I know about?
----------------------------------------------------------------
-
-`netCDF4-python`__ provides a lower level interface for working with
-netCDF and OpenDAP datasets in Python. We use netCDF4-python internally in
-xarray, and have contributed a number of improvements and fixes upstream. Xarray
-does not yet support all of netCDF4-python's features, such as modifying files
-on-disk.
-
-__ https://unidata.github.io/netcdf4-python/
-
-Iris_ (supported by the UK Met office) provides similar tools for in-
-memory manipulation of labeled arrays, aimed specifically at weather and
-climate data needs. Indeed, the Iris :py:class:`~iris.cube.Cube` was direct
-inspiration for xarray's :py:class:`~xarray.DataArray`. Xarray and Iris take very
-different approaches to handling metadata: Iris strictly interprets
-`CF conventions`_. Iris particularly shines at mapping, thanks to its
-integration with Cartopy_.
-
-.. _Iris: https://scitools-iris.readthedocs.io/en/stable/
-.. _Cartopy: https://scitools.org.uk/cartopy/docs/latest/
-
-We think the design decisions we have made for xarray (namely, basing it on
-pandas) make it a faster and more flexible data analysis tool. That said, Iris
-has some great domain specific functionality, and there are dedicated methods for
-converting back and forth between xarray and Iris. See
-:ref:`Reading and Writing Iris data <io.iris>` for more details.
-
-What other projects leverage xarray?
-------------------------------------
-
-See section :ref:`ecosystem`.
-
-How do I open format X file as an xarray dataset?
--------------------------------------------------
-
-To open format X file in xarray, you need to know the `format of the data <https://docs.xarray.dev/en/stable/user-guide/io.html#csv-and-other-formats-supported-by-pandas/>`_ you want to read. If the format is supported, you can use the appropriate function provided by xarray. The following table provides functions used for different file formats in xarray, as well as links to other packages that can be used:
-
-.. csv-table::
-   :header: "File Format", "Open via", " Related Packages"
-   :widths: 15, 45, 15
-
-   "NetCDF (.nc, .nc4, .cdf)","``open_dataset()`` OR ``open_mfdataset()``", "`netCDF4 <https://pypi.org/project/netCDF4/>`_, `netcdf <https://pypi.org/project/netcdf/>`_ , `cdms2 <https://cdms.readthedocs.io/en/latest/cdms2.html>`_"
-   "HDF5 (.h5, .hdf5)","``open_dataset()`` OR ``open_mfdataset()``", "`h5py <https://www.h5py.org/>`_, `pytables <https://www.pytables.org/>`_ "
-   "GRIB (.grb, .grib)", "``open_dataset()``", "`cfgrib <https://pypi.org/project/cfgrib/>`_, `pygrib <https://pypi.org/project/pygrib/>`_"
-   "CSV (.csv)","``open_dataset()``", "`pandas`_ , `dask <https://www.dask.org/>`_"
-   "Zarr (.zarr)","``open_dataset()`` OR ``open_mfdataset()``", "`zarr <https://pypi.org/project/zarr/>`_ , `dask <https://www.dask.org/>`_ "
-
-.. _pandas: https://pandas.pydata.org
-
-If you are unable to open a file in xarray:
-
-- You should check that you are having all necessary dependencies installed, including any optional dependencies (like scipy, h5netcdf, cfgrib etc as mentioned below) that may be required for the specific use case.
-
-- If all necessary dependencies are installed but the file still cannot be opened, you must check if there are any specialized backends available for the specific file format you are working with. You can consult the xarray documentation or the documentation for the file format to determine if a specialized backend is required, and if so, how to install and use it with xarray.
-
-- If the file format is not supported by xarray or any of its available backends, the user may need to use a different library or tool to work with the file. You can consult the documentation for the file format to determine which tools are recommended for working with it.
-
-Xarray provides a default engine to read files, which is usually determined by the file extension or type. If you don't specify the engine, xarray will try to guess it based on the file extension or type, and may fall back to a different engine if it cannot determine the correct one.
-
-Therefore, it's good practice to always specify the engine explicitly, to ensure that the correct backend is used and especially when working with complex data formats or non-standard file extensions.
-
-:py:func:`xarray.backends.list_engines` is a function in xarray that returns a dictionary of available engines and their BackendEntrypoint objects.
-
-You can use the ``engine`` argument to specify the backend when calling ``open_dataset()`` or other reading functions in xarray, as shown below:
-
-NetCDF
-~~~~~~
-If you are reading a netCDF file with a ".nc" extension, the default engine is ``netcdf4``. However if you have files with non-standard extensions or if the file format is ambiguous. Specify the engine explicitly, to ensure that the correct backend is used.
-
-Use :py:func:`~xarray.open_dataset` to open a NetCDF file and return an xarray Dataset object.
-
-.. code:: python
-
-    import xarray as xr
-
-    # use xarray to open the file and return an xarray.Dataset object using netcdf4 engine
-
-    ds = xr.open_dataset("/path/to/my/file.nc", engine="netcdf4")
-
-    # Print Dataset object
-
-    print(ds)
-
-    # use xarray to open the file and return an xarray.Dataset object using scipy engine
-
-    ds = xr.open_dataset("/path/to/my/file.nc", engine="scipy")
-
-We recommend installing ``scipy`` via conda using the below given code:
-
-::
-
-    conda install scipy
-
-HDF5
-~~~~
-Use :py:func:`~xarray.open_dataset` to open an HDF5 file and return an xarray Dataset object.
-
-You should specify the ``engine`` keyword argument when reading HDF5 files with xarray, as there are multiple backends that can be used to read HDF5 files, and xarray may not always be able to automatically detect the correct one based on the file extension or file format.
-
-To read HDF5 files with xarray, you can use the :py:func:`~xarray.open_dataset` function from the ``h5netcdf`` backend, as follows:
-
-.. code:: python
-
-    import xarray as xr
-
-    # Open HDF5 file as an xarray Dataset
-
-    ds = xr.open_dataset("path/to/hdf5/file.hdf5", engine="h5netcdf")
-
-    # Print Dataset object
-
-    print(ds)
-
-We recommend you to install ``h5netcdf`` library using the below given code:
-
-::
-
-    conda install -c conda-forge h5netcdf
-
-If you want to use the ``netCDF4`` backend to read a file with a ".h5" extension (which is typically associated with HDF5 file format), you can specify the engine argument as follows:
-
-.. code:: python
-
-    ds = xr.open_dataset("path/to/file.h5", engine="netcdf4")
-
-GRIB
-~~~~
-You should specify the ``engine`` keyword argument when reading GRIB files with xarray, as there are multiple backends that can be used to read GRIB files, and xarray may not always be able to automatically detect the correct one based on the file extension or file format.
-
-Use the :py:func:`~xarray.open_dataset` function from the ``cfgrib`` package to open a GRIB file as an xarray Dataset.
-
-.. code:: python
-
-    import xarray as xr
-
-    # define the path to your GRIB file and the engine you want to use to open the file
-    # use ``open_dataset()`` to open the file with the specified engine and return an xarray.Dataset object
-
-    ds = xr.open_dataset("path/to/your/file.grib", engine="cfgrib")
-
-    # Print Dataset object
-
-    print(ds)
-
-We recommend installing ``cfgrib`` via conda using the below given code:
-
-::
-
-    conda install -c conda-forge cfgrib
-
-CSV
-~~~
-By default, xarray uses the built-in ``pandas`` library to read CSV files. In general, you don't need to specify the engine keyword argument when reading CSV files with xarray, as the default ``pandas`` engine is usually sufficient for most use cases. If you are working with very large CSV files or if you need to perform certain types of data processing that are not supported by the default ``pandas`` engine, you may want to use a different backend.
-In such cases, you can specify the engine argument when reading the CSV file with xarray.
-
-To read CSV files with xarray, use the :py:func:`~xarray.open_dataset` function and specify the path to the CSV file as follows:
-
-.. code:: python
-
-    import xarray as xr
-    import pandas as pd
-
-    # Load CSV file into pandas DataFrame using the "c" engine
-
-    df = pd.read_csv("your_file.csv", engine="c")
-
-    # Convert `:py:func:pandas` DataFrame to xarray.Dataset
-
-    ds = xr.Dataset.from_dataframe(df)
-
-    # Prints the resulting xarray dataset
-
-    print(ds)
-
-Zarr
-~~~~
-When opening a Zarr dataset with xarray, the ``engine`` is automatically detected based on the file extension or the type of input provided. If the dataset is stored in a directory with a ".zarr" extension, xarray will automatically use the "zarr" engine.
-
-To read zarr files with xarray, use the :py:func:`~xarray.open_dataset` function and specify the path to the zarr file as follows:
-
-.. code:: python
-
-    import xarray as xr
-
-    # use xarray to open the file and return an xarray.Dataset object using zarr engine
-
-    ds = xr.open_dataset("path/to/your/file.zarr", engine="zarr")
-
-    # Print Dataset object
-
-    print(ds)
-
-We recommend installing ``zarr`` via conda using the below given code:
-
-::
-
-    conda install -c conda-forge zarr
-
-There may be situations where you need to specify the engine manually using the ``engine`` keyword argument. For example, if you have a Zarr dataset stored in a file with a different extension (e.g., ".npy"), you will need to specify the engine as "zarr" explicitly when opening the dataset.
-
-Some packages may have additional functionality beyond what is shown here. You can refer to the documentation for each package for more information.
-
-How does xarray handle missing values?
---------------------------------------
-
-**xarray can handle missing values using ``np.nan``**
-
-- ``np.nan`` is  used to represent missing values in labeled arrays and datasets. It is a commonly used standard for representing missing or undefined numerical data in scientific computing. ``np.nan`` is a constant value in NumPy that represents "Not a Number" or missing values.
-
-- Most of xarray's computation methods are designed to automatically handle missing values appropriately.
-
-  For example, when performing operations like addition or multiplication on arrays that contain missing values, xarray will automatically ignore the missing values and only perform the operation on the valid data. This makes it easy to work with data that may contain missing or undefined values without having to worry about handling them explicitly.
-
-- Many of xarray's `aggregation methods <https://docs.xarray.dev/en/stable/user-guide/computation.html#aggregation>`_, such as ``sum()``, ``mean()``, ``min()``, ``max()``, and others, have a skipna argument that controls whether missing values (represented by NaN) should be skipped (True) or treated as NaN (False) when performing the calculation.
-
-  By default, ``skipna`` is set to ``True``, so missing values are ignored when computing the result. However, you can set ``skipna`` to ``False`` if you want missing values to be treated as NaN and included in the calculation.
-
-- On `plotting <https://docs.xarray.dev/en/stable/user-guide/plotting.html#missing-values>`_ an xarray dataset or array that contains missing values, xarray will simply leave the missing values as blank spaces in the plot.
-
-- We have a set of `methods <https://docs.xarray.dev/en/stable/user-guide/computation.html#missing-values>`_ for manipulating missing and filling values.
-
-How should I cite xarray?
--------------------------
-
-If you are using xarray and would like to cite it in academic publication, we
-would certainly appreciate it. We recommend two citations.
-
-  1. At a minimum, we recommend citing the xarray overview journal article,
-     published in the Journal of Open Research Software.
-
-     - Hoyer, S. & Hamman, J., (2017). xarray: N-D labeled Arrays and
-       Datasets in Python. Journal of Open Research Software. 5(1), p.10.
-       DOI: https://doi.org/10.5334/jors.148
-
-       Here’s an example of a BibTeX entry::
-
-           @article{hoyer2017xarray,
-             title     = {xarray: {N-D} labeled arrays and datasets in {Python}},
-             author    = {Hoyer, S. and J. Hamman},
-             journal   = {Journal of Open Research Software},
-             volume    = {5},
-             number    = {1},
-             year      = {2017},
-             publisher = {Ubiquity Press},
-             doi       = {10.5334/jors.148},
-             url       = {https://doi.org/10.5334/jors.148}
-           }
-
-  2. You may also want to cite a specific version of the xarray package. We
-     provide a `Zenodo citation and DOI <https://doi.org/10.5281/zenodo.598201>`_
-     for this purpose:
-
-        .. image:: https://zenodo.org/badge/doi/10.5281/zenodo.598201.svg
-           :target: https://doi.org/10.5281/zenodo.598201
-
-       An example BibTeX entry::
-
-           @misc{xarray_v0_8_0,
-                 author = {Stephan Hoyer and Clark Fitzgerald and Joe Hamman and others},
-                 title  = {xarray: v0.8.0},
-                 month  = aug,
-                 year   = 2016,
-                 doi    = {10.5281/zenodo.59499},
-                 url    = {https://doi.org/10.5281/zenodo.59499}
-                }
-
-.. _api-stability:
-
-How stable is Xarray's API?
----------------------------
-
-Xarray tries very hard to maintain backwards compatibility in our :ref:`api` between released versions.
-Whilst we do occasionally make breaking changes in order to improve the library,
-we `signpost changes <https://docs.xarray.dev/en/stable/contributing.html#backwards-compatibility>`_ with ``DeprecationWarnings`` for many releases in advance.
-(An exception is bugs - whose behaviour we try to fix as soon as we notice them.)
-Our `test-driven development practices <https://docs.xarray.dev/en/stable/contributing.html#test-driven-development-code-writing>`_ helps to ensure any accidental regressions are caught.
-This philosophy applies to everything in the `public API <https://docs.xarray.dev/en/stable/getting-started-guide/faq.html#what-parts-of-xarray-are-considered-public-api>`_.
-
-.. _public-api:
-
-What parts of xarray are considered public API?
------------------------------------------------
-
-As a rule, only functions/methods documented in our :ref:`api` are considered
-part of xarray's public API. Everything else (in particular, everything in
-``xarray.core`` that is not also exposed in the top level ``xarray`` namespace)
-is considered a private implementation detail that may change at any time.
-
-Objects that exist to facilitate xarray's fluent interface on ``DataArray`` and
-``Dataset`` objects are a special case. For convenience, we document them in
-the API docs, but only their methods and the ``DataArray``/``Dataset``
-methods/properties to construct them (e.g., ``.plot()``, ``.groupby()``,
-``.str``) are considered public API. Constructors and other details of the
-internal classes used to implemented them (i.e.,
-``xarray.plot.plotting._PlotMethods``, ``xarray.core.groupby.DataArrayGroupBy``,
-``xarray.core.accessor_str.StringAccessor``) are not.
diff -pruN 2025.03.1-8/doc/getting-started-guide/index.rst 2025.10.1-1/doc/getting-started-guide/index.rst
--- 2025.03.1-8/doc/getting-started-guide/index.rst	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/doc/getting-started-guide/index.rst	2025-10-10 10:38:05.000000000 +0000
@@ -2,14 +2,13 @@
 Getting Started
 ################
 
-The getting started guide aims to get you using xarray productively as quickly as possible.
-It is designed as an entry point for new users, and it provided an introduction to xarray's main concepts.
+The getting started guide aims to get you using Xarray productively as quickly as possible.
+It is designed as an entry point for new users, and it provided an introduction to Xarray's main concepts.
 
 .. toctree::
    :maxdepth: 2
-   :hidden:
 
    why-xarray
    installing
    quick-overview
-   faq
+   tutorials-and-videos
diff -pruN 2025.03.1-8/doc/getting-started-guide/installing.rst 2025.10.1-1/doc/getting-started-guide/installing.rst
--- 2025.03.1-8/doc/getting-started-guide/installing.rst	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/doc/getting-started-guide/installing.rst	2025-10-10 10:38:05.000000000 +0000
@@ -6,10 +6,10 @@ Installation
 Required dependencies
 ---------------------
 
-- Python (3.10 or later)
-- `numpy <https://www.numpy.org/>`__ (1.23 or later)
-- `packaging <https://packaging.pypa.io/en/latest/#>`__ (23.1 or later)
-- `pandas <https://pandas.pydata.org/>`__ (2.0 or later)
+- Python (3.11 or later)
+- `numpy <https://www.numpy.org/>`__ (1.26 or later)
+- `packaging <https://packaging.pypa.io/en/latest/#>`__ (24.1 or later)
+- `pandas <https://pandas.pydata.org/>`__ (2.2 or later)
 
 .. _optional-dependencies:
 
@@ -57,7 +57,7 @@ For plotting
 ~~~~~~~~~~~~
 
 - `matplotlib <https://matplotlib.org>`__: required for :ref:`plotting`
-- `cartopy <https://scitools.org.uk/cartopy>`__: recommended for :ref:`plot-maps`
+- `cartopy <https://cartopy.readthedocs.io>`__: recommended for :ref:`plot-maps`
 - `seaborn <https://seaborn.pydata.org>`__: for better
   color palettes
 - `nc-time-axis <https://nc-time-axis.readthedocs.io>`__: for plotting
diff -pruN 2025.03.1-8/doc/getting-started-guide/quick-overview.rst 2025.10.1-1/doc/getting-started-guide/quick-overview.rst
--- 2025.03.1-8/doc/getting-started-guide/quick-overview.rst	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/doc/getting-started-guide/quick-overview.rst	2025-10-10 10:38:05.000000000 +0000
@@ -8,7 +8,7 @@ documentation.
 
 To begin, import numpy, pandas and xarray using their customary abbreviations:
 
-.. ipython:: python
+.. jupyter-execute::
 
     import numpy as np
     import pandas as pd
@@ -20,20 +20,20 @@ Create a DataArray
 You can make a DataArray from scratch by supplying data in the form of a numpy
 array or list, with optional *dimensions* and *coordinates*:
 
-.. ipython:: python
+.. jupyter-execute::
 
     data = xr.DataArray(np.random.randn(2, 3), dims=("x", "y"), coords={"x": [10, 20]})
     data
 
 In this case, we have generated a 2D array, assigned the names *x* and *y* to the two dimensions respectively and associated two *coordinate labels* '10' and '20' with the two locations along the x dimension. If you supply a pandas :py:class:`~pandas.Series` or :py:class:`~pandas.DataFrame`, metadata is copied directly:
 
-.. ipython:: python
+.. jupyter-execute::
 
     xr.DataArray(pd.Series(range(3), index=list("abc"), name="foo"))
 
 Here are the key properties for a ``DataArray``:
 
-.. ipython:: python
+.. jupyter-execute::
 
     # like in pandas, values is a numpy array that you can modify in-place
     data.values
@@ -48,7 +48,7 @@ Indexing
 
 Xarray supports four kinds of indexing. Since we have assigned coordinate labels to the x dimension we can use label-based indexing along that dimension just like pandas. The four examples below all yield the same result (the value at ``x=10``) but at varying levels of convenience and intuitiveness.
 
-.. ipython:: python
+.. jupyter-execute::
 
     # positional and by integer label, like numpy
     data[0, :]
@@ -71,7 +71,7 @@ Attributes
 
 While you're setting up your DataArray, it's often a good idea to set metadata attributes. A useful choice is to set ``data.attrs['long_name']`` and ``data.attrs['units']`` since xarray will use these, if present, to automatically label your plots. These special names were chosen following the `NetCDF Climate and Forecast (CF) Metadata Conventions <https://cfconventions.org/cf-conventions/cf-conventions.html>`_. ``attrs`` is just a Python dictionary, so you can assign anything you wish.
 
-.. ipython:: python
+.. jupyter-execute::
 
     data.attrs["long_name"] = "random velocity"
     data.attrs["units"] = "metres/sec"
@@ -87,7 +87,7 @@ Computation
 
 Data arrays work very similarly to numpy ndarrays:
 
-.. ipython:: python
+.. jupyter-execute::
 
     data + 10
     np.sin(data)
@@ -98,14 +98,14 @@ Data arrays work very similarly to numpy
 However, aggregation operations can use dimension names instead of axis
 numbers:
 
-.. ipython:: python
+.. jupyter-execute::
 
     data.mean(dim="x")
 
 Arithmetic operations broadcast based on dimension name. This means you don't
 need to insert dummy dimensions for alignment:
 
-.. ipython:: python
+.. jupyter-execute::
 
     a = xr.DataArray(np.random.randn(3), [data.coords["y"]])
     b = xr.DataArray(np.random.randn(4), dims="z")
@@ -118,24 +118,24 @@ need to insert dummy dimensions for alig
 It also means that in most cases you do not need to worry about the order of
 dimensions:
 
-.. ipython:: python
+.. jupyter-execute::
 
     data - data.T
 
 Operations also align based on index labels:
 
-.. ipython:: python
+.. jupyter-execute::
 
     data[:-1] - data[:1]
 
-For more, see :ref:`comput`.
+For more, see :ref:`compute`.
 
 GroupBy
 -------
 
 Xarray supports grouped operations using a very similar API to pandas (see :ref:`groupby`):
 
-.. ipython:: python
+.. jupyter-execute::
 
     labels = xr.DataArray(["E", "F", "E"], [data.coords["y"]], name="labels")
     labels
@@ -147,9 +147,8 @@ Plotting
 
 Visualizing your datasets is quick and convenient:
 
-.. ipython:: python
+.. jupyter-execute::
 
-    @savefig plotting_quick_overview.png
     data.plot()
 
 Note the automatic labeling with names and units. Our effort in adding metadata attributes has paid off! Many aspects of these figures are customizable: see :ref:`plotting`.
@@ -159,7 +158,7 @@ pandas
 
 Xarray objects can be easily converted to and from pandas objects using the :py:meth:`~xarray.DataArray.to_series`, :py:meth:`~xarray.DataArray.to_dataframe` and :py:meth:`~pandas.DataFrame.to_xarray` methods:
 
-.. ipython:: python
+.. jupyter-execute::
 
     series = data.to_series()
     series
@@ -174,7 +173,7 @@ Datasets
 objects. You can think of it as a multi-dimensional generalization of the
 :py:class:`pandas.DataFrame`:
 
-.. ipython:: python
+.. jupyter-execute::
 
     ds = xr.Dataset(dict(foo=data, bar=("x", [1, 2]), baz=np.pi))
     ds
@@ -182,7 +181,7 @@ objects. You can think of it as a multi-
 
 This creates a dataset with three DataArrays named ``foo``, ``bar`` and ``baz``. Use dictionary or dot indexing to pull out ``Dataset`` variables as ``DataArray`` objects but note that assignment only works with dictionary indexing:
 
-.. ipython:: python
+.. jupyter-execute::
 
     ds["foo"]
     ds.foo
@@ -192,7 +191,7 @@ When creating ``ds``, we specified that
 
 For example, when creating ``ds`` xarray automatically *aligns* ``bar`` with ``DataArray`` ``foo``, i.e., they share the same coordinate system so that ``ds.bar['x'] == ds.foo['x'] == ds['x']``. Consequently, the following works without explicitly specifying the coordinate ``x`` when creating ``ds['bar']``:
 
-.. ipython:: python
+.. jupyter-execute::
 
     ds.bar.sel(x=10)
 
@@ -212,14 +211,14 @@ model looks very similar to a netCDF fil
 You can directly read and write xarray objects to disk using :py:meth:`~xarray.Dataset.to_netcdf`, :py:func:`~xarray.open_dataset` and
 :py:func:`~xarray.open_dataarray`:
 
-.. ipython:: python
+.. jupyter-execute::
 
     ds.to_netcdf("example.nc")
     reopened = xr.open_dataset("example.nc")
     reopened
 
-.. ipython:: python
-    :suppress:
+.. jupyter-execute::
+    :hide-code:
 
     import os
 
@@ -239,7 +238,7 @@ DataTrees
 
 Let's first make some example xarray datasets:
 
-.. ipython:: python
+.. jupyter-execute::
 
     import numpy as np
     import xarray as xr
@@ -259,7 +258,7 @@ Let's first make some example xarray dat
 
 Now we'll put these datasets into a hierarchical DataTree:
 
-.. ipython:: python
+.. jupyter-execute::
 
     dt = xr.DataTree.from_dict(
         {"simulation/coarse": ds, "simulation/fine": ds2, "/": ds3}
@@ -290,26 +289,26 @@ addition of requiring parent-descendent
 We created the subgroups using a filesystem-like syntax, and accessing groups works the same way.  We can access
 individual DataArrays in a similar fashion.
 
-.. ipython:: python
+.. jupyter-execute::
 
     dt["simulation/coarse/foo"]
 
 We can also view the data in a particular group as a read-only :py:class:`~xarray.Datatree.DatasetView` using :py:attr:`xarray.Datatree.dataset`:
 
-.. ipython:: python
+.. jupyter-execute::
 
     dt["simulation/coarse"].dataset
 
 We can get a copy of the :py:class:`~xarray.Dataset` including the inherited coordinates by calling the :py:class:`~xarray.datatree.to_dataset` method:
 
-.. ipython:: python
+.. jupyter-execute::
 
     ds_inherited = dt["simulation/coarse"].to_dataset()
     ds_inherited
 
 And you can get a copy of just the node local values of :py:class:`~xarray.Dataset` by setting the ``inherit`` keyword to ``False``:
 
-.. ipython:: python
+.. jupyter-execute::
 
     ds_node_local = dt["simulation/coarse"].to_dataset(inherit=False)
     ds_node_local
@@ -322,7 +321,7 @@ And you can get a copy of just the node
 
 .. Operations map over subtrees, so we can take a mean over the ``x`` dimension of both the ``fine`` and ``coarse`` groups just by:
 
-.. .. ipython:: python
+.. .. jupyter-execute::
 
 ..     avg = dt["simulation"].mean(dim="x")
 ..     avg
diff -pruN 2025.03.1-8/doc/getting-started-guide/tutorials-and-videos.rst 2025.10.1-1/doc/getting-started-guide/tutorials-and-videos.rst
--- 2025.03.1-8/doc/getting-started-guide/tutorials-and-videos.rst	1970-01-01 00:00:00.000000000 +0000
+++ 2025.10.1-1/doc/getting-started-guide/tutorials-and-videos.rst	2025-10-10 10:38:05.000000000 +0000
@@ -0,0 +1,34 @@
+
+Tutorials and Videos
+====================
+
+There are an abundance of tutorials and videos available for learning how to use *xarray*.
+Often, these tutorials are taught to workshop attendees at conferences or other events.
+We highlight a number of these resources below, but this is by no means an exhaustive list!
+
+Tutorials
+----------
+
+- `Xarray's Tutorials`_ repository
+- The `UW eScience Institute's Geohackweek`_ tutorial on xarray for geospatial data scientists.
+- `Nicolas Fauchereau's 2015 tutorial`_ on xarray for netCDF users.
+
+
+Videos
+-------
+
+.. include:: ../videos-gallery.txt
+
+
+Books, Chapters and Articles
+-----------------------------
+
+- Stephan Hoyer and Joe Hamman's `Journal of Open Research Software paper`_ describing the xarray project.
+
+
+.. _Xarray's Tutorials: https://xarray-contrib.github.io/xarray-tutorial/
+.. _Journal of Open Research Software paper: https://doi.org/10.5334/jors.148
+.. _UW eScience Institute's Geohackweek : https://geohackweek.github.io/nDarrays/
+.. _tutorial: https://github.com/Unidata/unidata-users-workshop/blob/master/notebooks/xray-tutorial.ipynb
+.. _with answers: https://github.com/Unidata/unidata-users-workshop/blob/master/notebooks/xray-tutorial-with-answers.ipynb
+.. _Nicolas Fauchereau's 2015 tutorial: https://nbviewer.iPython.org/github/nicolasfauchereau/metocean/blob/master/notebooks/xray.ipynb
diff -pruN 2025.03.1-8/doc/help-diagram.rst 2025.10.1-1/doc/help-diagram.rst
--- 2025.03.1-8/doc/help-diagram.rst	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/doc/help-diagram.rst	1970-01-01 00:00:00.000000000 +0000
@@ -1,75 +0,0 @@
-Getting Help
-============
-
-Navigating the wealth of resources available for Xarray can be overwhelming.
-We've created this flow chart to help guide you towards the best way to get help, depending on what you're working towards.
-The links to each resource are provided below the diagram.
-Regardless of how you interact with us, we're always thrilled to hear from you!
-
-.. mermaid::
-    :alt: Flowchart illustrating the different ways to access help using or contributing to Xarray.
-
-    flowchart TD
-        intro[Welcome to Xarray! How can we help?]:::quesNodefmt
-        usage(["fa:fa-chalkboard-user Xarray Tutorials
-            fab:fa-readme Xarray Docs
-            fab:fa-google Google/fab:fa-stack-overflow Stack Exchange
-            fa:fa-robot Ask AI/a Language Learning Model (LLM)"]):::ansNodefmt
-        API([fab:fa-readme Xarray Docs
-            fab:fa-readme extension's docs]):::ansNodefmt
-        help([fab:fa-github Xarray Discussions
-            fab:fa-discord Xarray Discord
-            fa:fa-users Xarray Office Hours
-            fa:fa-globe Pangeo Discourse]):::ansNodefmt
-        bug([Report and Propose here:
-            fab:fa-github Xarray Issues]):::ansNodefmt
-        contrib([fa:fa-book-open Xarray Contributor's Guide]):::ansNodefmt
-        pr(["fab:fa-github Pull Request (PR)"]):::ansNodefmt
-        dev([fab:fa-github Comment on your PR
-            fa:fa-users Developer's Meeting]):::ansNodefmt
-        report[Thanks for letting us know!]:::quesNodefmt
-        merged[fa:fa-hands-clapping Your PR was merged.
-            Thanks for contributing to Xarray!]:::quesNodefmt
-
-
-        intro -->|How do I use Xarray?| usage
-        usage -->|"with extensions (like Dask)"| API
-
-        usage -->|I'd like some more help| help
-        intro -->|I found a bug| bug
-        intro -->|I'd like to make a small change| contrib
-        subgraph bugcontrib[Bugs and Contributions]
-            bug
-            contrib
-            bug -->|I just wanted to tell you| report
-            bug<-->|I'd like to fix the bug!| contrib
-            pr -->|my PR was approved| merged
-        end
-
-
-        intro -->|I wish Xarray could...| bug
-
-
-        pr <-->|my PR is quiet| dev
-        contrib -->pr
-
-        classDef quesNodefmt fill:#9DEEF4,stroke:#206C89
-
-        classDef ansNodefmt fill:#FFAA05,stroke:#E37F17
-
-        classDef boxfmt fill:#FFF5ED,stroke:#E37F17
-        class bugcontrib boxfmt
-
-        linkStyle default font-size:20pt,color:#206C89
-
-
-- `Xarray Tutorials <https://tutorial.xarray.dev/>`__
-- `Xarray Docs <https://docs.xarray.dev/en/stable/>`__
-- `Google/Stack Exchange <https://stackoverflow.com/questions/tagged/python-xarray>`__
-- `Xarray Discussions <https://github.com/pydata/xarray/discussions>`__
-- `Xarray Discord <https://discord.com/invite/wEKPCt4PDu>`__
-- `Xarray Office Hours <https://github.com/pydata/xarray/discussions/categories/office-hours>`__
-- `Pangeo Discourse <https://discourse.pangeo.io/>`__
-- `Xarray Issues <https://github.com/pydata/xarray/issues>`__
-- `Xarray Contributors Guide <https://docs.xarray.dev/en/stable/contributing.html>`__
-- `Developer's Meeting <https://docs.xarray.dev/en/stable/developers-meeting.html>`__
diff -pruN 2025.03.1-8/doc/howdoi.rst 2025.10.1-1/doc/howdoi.rst
--- 2025.03.1-8/doc/howdoi.rst	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/doc/howdoi.rst	1970-01-01 00:00:00.000000000 +0000
@@ -1,77 +0,0 @@
-.. currentmodule:: xarray
-
-.. _howdoi:
-
-How do I ...
-============
-
-.. list-table::
-   :header-rows: 1
-   :widths: 40 60
-
-   * - How do I...
-     - Solution
-   * - add a DataArray to my dataset as a new variable
-     - ``my_dataset[varname] = my_dataArray`` or :py:meth:`Dataset.assign` (see also :ref:`dictionary_like_methods`)
-   * - add variables from other datasets to my dataset
-     - :py:meth:`Dataset.merge`
-   * - add a new dimension and/or coordinate
-     - :py:meth:`DataArray.expand_dims`, :py:meth:`Dataset.expand_dims`
-   * - add a new coordinate variable
-     - :py:meth:`DataArray.assign_coords`
-   * - change a data variable to a coordinate variable
-     - :py:meth:`Dataset.set_coords`
-   * - change the order of dimensions
-     - :py:meth:`DataArray.transpose`, :py:meth:`Dataset.transpose`
-   * - reshape dimensions
-     - :py:meth:`DataArray.stack`, :py:meth:`Dataset.stack`, :py:meth:`Dataset.coarsen.construct`, :py:meth:`DataArray.coarsen.construct`
-   * - remove a variable from my object
-     - :py:meth:`Dataset.drop_vars`, :py:meth:`DataArray.drop_vars`
-   * - remove dimensions of length 1 or 0
-     - :py:meth:`DataArray.squeeze`, :py:meth:`Dataset.squeeze`
-   * - remove all variables with a particular dimension
-     - :py:meth:`Dataset.drop_dims`
-   * - convert non-dimension coordinates to data variables or remove them
-     - :py:meth:`DataArray.reset_coords`, :py:meth:`Dataset.reset_coords`
-   * - rename a variable, dimension or coordinate
-     - :py:meth:`Dataset.rename`, :py:meth:`DataArray.rename`, :py:meth:`Dataset.rename_vars`, :py:meth:`Dataset.rename_dims`,
-   * - convert a DataArray to Dataset or vice versa
-     - :py:meth:`DataArray.to_dataset`, :py:meth:`Dataset.to_dataarray`, :py:meth:`Dataset.to_stacked_array`, :py:meth:`DataArray.to_unstacked_dataset`
-   * - extract variables that have certain attributes
-     - :py:meth:`Dataset.filter_by_attrs`
-   * - extract the underlying array (e.g. NumPy or Dask arrays)
-     - :py:attr:`DataArray.data`
-   * - convert to and extract the underlying NumPy array
-     - :py:attr:`DataArray.to_numpy`
-   * - convert to a pandas DataFrame
-     - :py:attr:`Dataset.to_dataframe`
-   * - sort values
-     - :py:attr:`Dataset.sortby`
-   * - find out if my xarray object is wrapping a Dask Array
-     - :py:func:`dask.is_dask_collection`
-   * - know how much memory my object requires
-     - :py:attr:`DataArray.nbytes`, :py:attr:`Dataset.nbytes`
-   * - Get axis number for a dimension
-     - :py:meth:`DataArray.get_axis_num`
-   * - convert a possibly irregularly sampled timeseries to a regularly sampled timeseries
-     - :py:meth:`DataArray.resample`, :py:meth:`Dataset.resample` (see :ref:`resampling` for more)
-   * - apply a function on all data variables in a Dataset
-     - :py:meth:`Dataset.map`
-   * - write xarray objects with complex values to a netCDF file
-     - :py:func:`Dataset.to_netcdf`, :py:func:`DataArray.to_netcdf` specifying ``engine="h5netcdf"`` or :py:func:`Dataset.to_netcdf`, :py:func:`DataArray.to_netcdf` specifying ``engine="netCDF4", auto_complex=True``
-   * - make xarray objects look like other xarray objects
-     - :py:func:`~xarray.ones_like`, :py:func:`~xarray.zeros_like`, :py:func:`~xarray.full_like`, :py:meth:`Dataset.reindex_like`, :py:meth:`Dataset.interp_like`, :py:meth:`Dataset.broadcast_like`, :py:meth:`DataArray.reindex_like`, :py:meth:`DataArray.interp_like`, :py:meth:`DataArray.broadcast_like`
-   * - Make sure my datasets have values at the same coordinate locations
-     - ``xr.align(dataset_1, dataset_2, join="exact")``
-   * - replace NaNs with other values
-     - :py:meth:`Dataset.fillna`, :py:meth:`Dataset.ffill`, :py:meth:`Dataset.bfill`, :py:meth:`Dataset.interpolate_na`, :py:meth:`DataArray.fillna`, :py:meth:`DataArray.ffill`, :py:meth:`DataArray.bfill`, :py:meth:`DataArray.interpolate_na`
-   * - extract the year, month, day or similar from a DataArray of time values
-     - ``obj.dt.month`` for example where ``obj`` is a :py:class:`~xarray.DataArray` containing ``datetime64`` or ``cftime`` values. See :ref:`dt_accessor` for more.
-   * - round off time values to a specified frequency
-     - ``obj.dt.ceil``, ``obj.dt.floor``, ``obj.dt.round``. See :ref:`dt_accessor` for more.
-   * - make a mask that is ``True`` where an object contains any of the values in a array
-     - :py:meth:`Dataset.isin`, :py:meth:`DataArray.isin`
-   * - Index using a boolean mask
-     - :py:meth:`Dataset.query`, :py:meth:`DataArray.query`, :py:meth:`Dataset.where`, :py:meth:`DataArray.where`
-   * - preserve ``attrs`` during (most) xarray operations
-     - ``xr.set_options(keep_attrs=True)``
diff -pruN 2025.03.1-8/doc/index.rst 2025.10.1-1/doc/index.rst
--- 2025.03.1-8/doc/index.rst	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/doc/index.rst	2025-10-10 10:38:05.000000000 +0000
@@ -1,3 +1,5 @@
+:html_theme.sidebar_secondary.remove: true
+
 .. module:: xarray
 
 Xarray documentation
@@ -6,86 +8,67 @@ Xarray documentation
 Xarray makes working with labelled multi-dimensional arrays in Python simple,
 efficient, and fun!
 
+**Version**: |version| - :ref:`whats-new`
+
 **Useful links**:
 `Home <https://xarray.dev>`__ |
 `Code Repository <https://github.com/pydata/xarray>`__ |
 `Issues <https://github.com/pydata/xarray/issues>`__ |
 `Discussions <https://github.com/pydata/xarray/discussions>`__ |
 `Releases <https://github.com/pydata/xarray/releases>`__ |
+`Tutorial <https://tutorial.xarray.dev>`__ |
 `Stack Overflow <https://stackoverflow.com/questions/tagged/python-xarray>`__ |
-`Mailing List <https://groups.google.com/g/xarray>`__ |
 `Blog <https://xarray.dev/blog>`__ |
-`Tutorials <https://tutorial.xarray.dev/>`__
-
 
 .. grid:: 1 1 2 2
     :gutter: 2
 
-    .. grid-item-card:: Getting started
+    .. grid-item-card:: Get started!
         :img-top: _static/index_getting_started.svg
+        :class-card: intro-card
         :link: getting-started-guide/index
         :link-type: doc
 
-        New to *xarray*? Check out the getting started guides. They contain an
-        introduction to *Xarray's* main concepts and links to additional tutorials.
+        *New to Xarray?*
+        Start here with our installation instructions and a brief overview of Xarray.
 
     .. grid-item-card::  User guide
         :img-top: _static/index_user_guide.svg
+        :class-card: intro-card
         :link: user-guide/index
         :link-type: doc
 
-        The user guide provides in-depth information on the
-        key concepts of Xarray with useful background information and explanation.
+        *Ready to deepen your understanding of Xarray?*
+        Visit the user guide for detailed explanations of the data model, common computational patterns, and more.
 
     .. grid-item-card::  API reference
         :img-top: _static/index_api.svg
+        :class-card: intro-card
         :link: api
         :link-type: doc
 
-        The reference guide contains a detailed description of the Xarray API.
-        The reference describes how the methods work and which parameters can
-        be used. It assumes that you have an understanding of the key concepts.
+        *Need to learn more about a specific Xarray function?*
+        Go here to review the documentation of all public functions and classes in Xarray.
 
-    .. grid-item-card::  Developer guide
+    .. grid-item-card::  Contribute
         :img-top: _static/index_contribute.svg
-        :link: contributing
+        :class-card: intro-card
+        :link: contribute/contributing
         :link-type: doc
 
-        Saw a typo in the documentation? Want to improve existing functionalities?
-        The contributing guidelines will guide you through the process of improving
-        Xarray.
+        *Saw a typo in the documentation? Want to improve existing functionalities?*
+        Please review our guide on improving Xarray.
 
 .. toctree::
    :maxdepth: 2
    :hidden:
    :caption: For users
 
-   Getting Started <getting-started-guide/index>
+   Get Started <getting-started-guide/index>
    User Guide <user-guide/index>
+   Tutorial <https://tutorial.xarray.dev>
    Gallery <gallery>
-   Tutorials & Videos <tutorials-and-videos>
    API Reference <api>
-   How do I ... <howdoi>
-   Getting Help <help-diagram>
-   Ecosystem <ecosystem>
-
-.. toctree::
-   :maxdepth: 2
-   :hidden:
-   :caption: For developers/contributors
-
-   Contributing Guide <contributing>
-   Xarray Internals <internals/index>
-   Development Roadmap <roadmap>
-   Team <https://xarray.dev/team>
-   Developers Meeting <developers-meeting>
-   What’s New <whats-new>
-   GitHub repository <https://github.com/pydata/xarray>
-
-.. toctree::
-   :maxdepth: 1
-   :hidden:
-   :caption: Community
-
-   GitHub discussions <https://github.com/pydata/xarray/discussions>
-   StackOverflow <https://stackoverflow.com/questions/tagged/python-xarray>
+   Get Help <get-help/help-diagram>
+   Development <contribute/index>
+   Release Notes <whats-new>
diff -pruN 2025.03.1-8/doc/internals/duck-arrays-integration.rst 2025.10.1-1/doc/internals/duck-arrays-integration.rst
--- 2025.03.1-8/doc/internals/duck-arrays-integration.rst	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/doc/internals/duck-arrays-integration.rst	2025-10-10 10:38:05.000000000 +0000
@@ -70,18 +70,25 @@ To avoid duplicated information, this me
 :term:`dtype`. For example, the string representation of a ``dask`` array or a
 ``sparse`` matrix would be:
 
-.. ipython:: python
+.. jupyter-execute::
 
     import dask.array as da
     import xarray as xr
+    import numpy as np
     import sparse
 
+.. jupyter-execute::
+
     a = da.linspace(0, 1, 20, chunks=2)
     a
 
+.. jupyter-execute::
+
     b = np.eye(10)
     b[[5, 7, 3, 0], [6, 8, 2, 9]] = 2
     b = sparse.COO.from_numpy(b)
     b
 
+.. jupyter-execute::
+
     xr.Dataset(dict(a=("x", a), b=(("y", "z"), b)))
diff -pruN 2025.03.1-8/doc/internals/extending-xarray.rst 2025.10.1-1/doc/internals/extending-xarray.rst
--- 2025.03.1-8/doc/internals/extending-xarray.rst	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/doc/internals/extending-xarray.rst	2025-10-10 10:38:05.000000000 +0000
@@ -4,10 +4,11 @@
 Extending xarray using accessors
 ================================
 
-.. ipython:: python
-    :suppress:
+.. jupyter-execute::
+    :hide-code:
 
     import xarray as xr
+    import numpy as np
 
 
 Xarray is designed as a general purpose library and hence tries to avoid
@@ -89,15 +90,18 @@ reasons:
 
 Back in an interactive IPython session, we can use these properties:
 
-.. ipython:: python
-    :suppress:
+.. jupyter-execute::
+    :hide-code:
 
     exec(open("examples/_code/accessor_example.py").read())
 
-.. ipython:: python
+.. jupyter-execute::
 
     ds = xr.Dataset({"longitude": np.linspace(0, 10), "latitude": np.linspace(0, 20)})
     ds.geo.center
+
+.. jupyter-execute::
+
     ds.geo.plot()
 
 The intent here is that libraries that extend xarray could add such an accessor
diff -pruN 2025.03.1-8/doc/internals/how-to-add-new-backend.rst 2025.10.1-1/doc/internals/how-to-add-new-backend.rst
--- 2025.03.1-8/doc/internals/how-to-add-new-backend.rst	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/doc/internals/how-to-add-new-backend.rst	2025-10-10 10:38:05.000000000 +0000
@@ -221,21 +221,27 @@ performs the inverse transformation.
 
 In the following an example on how to use the coders ``decode`` method:
 
-.. ipython:: python
-    :suppress:
+.. jupyter-execute::
+    :hide-code:
 
     import xarray as xr
+    import numpy as np
 
-.. ipython:: python
+.. jupyter-execute::
 
     var = xr.Variable(
         dims=("x",), data=np.arange(10.0), attrs={"scale_factor": 10, "add_offset": 2}
     )
     var
 
+.. jupyter-execute::
+
     coder = xr.coding.variables.CFScaleOffsetCoder()
     decoded_var = coder.decode(var)
     decoded_var
+
+.. jupyter-execute::
+
     decoded_var.encoding
 
 Some of the transformations can be common to more backends, so before
@@ -325,10 +331,12 @@ information on plugins.
 How to support lazy loading
 +++++++++++++++++++++++++++
 
-If you want to make your backend effective with big datasets, then you should
-support lazy loading.
-Basically, you shall replace the :py:class:`numpy.ndarray` inside the
-variables with a custom class that supports lazy loading indexing.
+If you want to make your backend effective with big datasets, then you should take advantage of xarray's
+support for lazy loading and indexing.
+
+Basically, when your backend constructs the ``Variable`` objects,
+you need to replace the :py:class:`numpy.ndarray` inside the
+variables with a custom :py:class:`~xarray.backends.BackendArray` subclass that supports lazy loading and indexing.
 See the example below:
 
 .. code-block:: python
@@ -339,25 +347,27 @@ See the example below:
 
 Where:
 
-- :py:class:`~xarray.core.indexing.LazilyIndexedArray` is a class
-  provided by Xarray that manages the lazy loading.
-- ``MyBackendArray`` shall be implemented by the backend and shall inherit
+- :py:class:`~xarray.core.indexing.LazilyIndexedArray` is a wrapper class
+  provided by Xarray that manages the lazy loading and indexing.
+- ``MyBackendArray`` should be implemented by the backend and must inherit
   from :py:class:`~xarray.backends.BackendArray`.
 
 BackendArray subclassing
 ^^^^^^^^^^^^^^^^^^^^^^^^
 
-The BackendArray subclass shall implement the following method and attributes:
+The BackendArray subclass must implement the following method and attributes:
 
-- the ``__getitem__`` method that takes in input an index and returns a
-  `NumPy <https://numpy.org/>`__ array
-- the ``shape`` attribute
+- the ``__getitem__`` method that takes an index as an input and returns a
+  `NumPy <https://numpy.org/>`__ array,
+- the ``shape`` attribute,
 - the ``dtype`` attribute.
 
-Xarray supports different type of :doc:`/user-guide/indexing`, that can be
-grouped in three types of indexes
+It may also optionally implement an additional ``async_getitem`` method.
+
+Xarray supports different types of :doc:`/user-guide/indexing`, that can be
+grouped in three types of indexes:
 :py:class:`~xarray.core.indexing.BasicIndexer`,
-:py:class:`~xarray.core.indexing.OuterIndexer` and
+:py:class:`~xarray.core.indexing.OuterIndexer`, and
 :py:class:`~xarray.core.indexing.VectorizedIndexer`.
 This implies that the implementation of the method ``__getitem__`` can be tricky.
 In order to simplify this task, Xarray provides a helper function,
@@ -413,8 +423,22 @@ input the ``key``, the array ``shape`` a
 For more details see
 :py:class:`~xarray.core.indexing.IndexingSupport` and :ref:`RST indexing`.
 
+Async support
+^^^^^^^^^^^^^
+
+Backends can also optionally support loading data asynchronously via xarray's asynchronous loading methods
+(e.g. ``~xarray.Dataset.load_async``).
+To support async loading the ``BackendArray`` subclass must additionally implement the ``BackendArray.async_getitem`` method.
+
+Note that implementing this method is only necessary if you want to be able to load data from different xarray objects concurrently.
+Even without this method your ``BackendArray`` implementation is still free to concurrently load chunks of data for a single ``Variable`` itself,
+so long as it does so behind the synchronous ``__getitem__`` interface.
+
+Dask support
+^^^^^^^^^^^^
+
 In order to support `Dask Distributed <https://distributed.dask.org/>`__ and
-:py:mod:`multiprocessing`, ``BackendArray`` subclass should be serializable
+:py:mod:`multiprocessing`, the ``BackendArray`` subclass should be serializable
 either with :ref:`io.pickle` or
 `cloudpickle <https://github.com/cloudpipe/cloudpickle>`__.
 That implies that all the reference to open files should be dropped. For
@@ -432,42 +456,61 @@ In the ``BASIC`` indexing support, numbe
 
 Example:
 
-.. ipython::
-    :verbatim:
+.. jupyter-input::
+
+    # () shall return the full array
+    backend_array._raw_indexing_method(())
+
+.. jupyter-output::
+
+    array([[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11]])
+
+.. jupyter-input::
 
-    In [1]: # () shall return the full array
-       ...: backend_array._raw_indexing_method(())
-    Out[1]: array([[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11]])
-
-    In [2]: # shall support integers
-       ...: backend_array._raw_indexing_method(1, 1)
-    Out[2]: 5
-
-    In [3]: # shall support slices
-       ...: backend_array._raw_indexing_method(slice(0, 3), slice(2, 4))
-    Out[3]: array([[2, 3], [6, 7], [10, 11]])
+    # shall support integers
+    backend_array._raw_indexing_method(1, 1)
+
+.. jupyter-output::
+
+    5
+
+.. jupyter-input::
+
+   # shall support slices
+   backend_array._raw_indexing_method(slice(0, 3), slice(2, 4))
+
+.. jupyter-output::
+
+    array([[2, 3], [6, 7], [10, 11]])
 
 **OUTER**
 
 The ``OUTER`` indexing shall support number, slices and in addition it shall
-support also lists of integers. The the outer indexing is equivalent to
+support also lists of integers. The outer indexing is equivalent to
 combining multiple input list with ``itertools.product()``:
 
-.. ipython::
-    :verbatim:
+.. jupyter-input::
+
+    backend_array._raw_indexing_method([0, 1], [0, 1, 2])
+
+.. jupyter-output::
 
-    In [1]: backend_array._raw_indexing_method([0, 1], [0, 1, 2])
-    Out[1]: array([[0, 1, 2], [4, 5, 6]])
+    array([[0, 1, 2], [4, 5, 6]])
+
+.. jupyter-input::
 
     # shall support integers
-    In [2]: backend_array._raw_indexing_method(1, 1)
-    Out[2]: 5
+    backend_array._raw_indexing_method(1, 1)
+
+.. jupyter-output::
+
+    5
 
 
 **OUTER_1VECTOR**
 
 The ``OUTER_1VECTOR`` indexing shall supports number, slices and at most one
-list. The behaviour with the list shall be the same of ``OUTER`` indexing.
+list. The behaviour with the list shall be the same as ``OUTER`` indexing.
 
 If you support more complex indexing as explicit indexing or
 numpy indexing, you can have a look to the implementation of Zarr backend and Scipy backend,
diff -pruN 2025.03.1-8/doc/internals/how-to-create-custom-index.rst 2025.10.1-1/doc/internals/how-to-create-custom-index.rst
--- 2025.03.1-8/doc/internals/how-to-create-custom-index.rst	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/doc/internals/how-to-create-custom-index.rst	2025-10-10 10:38:05.000000000 +0000
@@ -53,8 +53,8 @@ number, order and dimensions of the coor
 responsibility of the index to check the consistency and validity of those input
 coordinates.
 
-For example, :py:class:`~xarray.core.indexes.PandasIndex` accepts only one coordinate and
-:py:class:`~xarray.core.indexes.PandasMultiIndex` accepts one or more 1-dimensional coordinates that must all
+For example, :py:class:`~xarray.indexes.PandasIndex` accepts only one coordinate and
+:py:class:`~xarray.indexes.PandasMultiIndex` accepts one or more 1-dimensional coordinates that must all
 share the same dimension. Other, custom indexes need not have the same
 constraints, e.g.,
 
diff -pruN 2025.03.1-8/doc/internals/internal-design.rst 2025.10.1-1/doc/internals/internal-design.rst
--- 2025.03.1-8/doc/internals/internal-design.rst	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/doc/internals/internal-design.rst	2025-10-10 10:38:05.000000000 +0000
@@ -1,12 +1,12 @@
-.. ipython:: python
-    :suppress:
+.. jupyter-execute::
+    :hide-code:
 
     import numpy as np
     import pandas as pd
     import xarray as xr
 
     np.random.seed(123456)
-    np.set_printoptions(threshold=20)
+    np.set_printoptions(threshold=10, edgeitems=2)
 
 .. _internal design:
 
@@ -86,9 +86,9 @@ DataArray Objects
 
 The simplest data structure used by most users is :py:class:`~xarray.DataArray`.
 A :py:class:`~xarray.DataArray` is a composite object consisting of multiple
-:py:class:`~xarray.core.variable.Variable` objects which store related data.
+:py:class:`~xarray.Variable` objects which store related data.
 
-A single :py:class:`~xarray.core.Variable` is referred to as the "data variable", and stored under the :py:attr:`~xarray.DataArray.variable`` attribute.
+A single :py:class:`~xarray.Variable` is referred to as the "data variable", and stored under the :py:attr:`~xarray.DataArray.variable`` attribute.
 A :py:class:`~xarray.DataArray` inherits all of the properties of this data variable, i.e. ``dims``, ``data``, ``attrs`` and ``encoding``,
 all of which are implemented by forwarding on to the underlying ``Variable`` object.
 
@@ -111,7 +111,7 @@ Finally a :py:class:`~xarray.DataArray`
 variable but is stored on the wrapping ``DataArray`` class.
 The ``name`` attribute is primarily used when one or more :py:class:`~xarray.DataArray` objects are promoted into a :py:class:`~xarray.Dataset`
 (e.g. via :py:meth:`~xarray.DataArray.to_dataset`).
-Note that the underlying :py:class:`~xarray.core.Variable` objects are all unnamed, so they can always be referred to uniquely via a
+Note that the underlying :py:class:`~xarray.Variable` objects are all unnamed, so they can always be referred to uniquely via a
 dict-like mapping.
 
 .. _internal design.dataset:
@@ -150,7 +150,7 @@ Lazy Loading
 If we open a ``Variable`` object from disk using :py:func:`~xarray.open_dataset` we can see that the actual values of
 the array wrapped by the data variable are not displayed.
 
-.. ipython:: python
+.. jupyter-execute::
 
     da = xr.tutorial.open_dataset("air_temperature")["air"]
     var = da.variable
@@ -162,7 +162,7 @@ This is because the values have not yet
 If we look at the private attribute :py:meth:`~xarray.Variable._data` containing the underlying array object, we see
 something interesting:
 
-.. ipython:: python
+.. jupyter-execute::
 
     var._data
 
@@ -171,13 +171,13 @@ but provide important functionality.
 
 Calling the public :py:attr:`~xarray.Variable.data` property loads the underlying array into memory.
 
-.. ipython:: python
+.. jupyter-execute::
 
     var.data
 
 This array is now cached, which we can see by accessing the private attribute again:
 
-.. ipython:: python
+.. jupyter-execute::
 
     var._data
 
@@ -189,14 +189,14 @@ subsequent analysis, by deferring loadin
 
 Let's open the data from disk again.
 
-.. ipython:: python
+.. jupyter-execute::
 
     da = xr.tutorial.open_dataset("air_temperature")["air"]
     var = da.variable
 
 Now, notice how even after subsetting the data has does not get loaded:
 
-.. ipython:: python
+.. jupyter-execute::
 
     var.isel(time=0)
 
@@ -204,7 +204,7 @@ The shape has changed, but the values ar
 
 Looking at the private attribute again shows how this indexing information was propagated via the hidden lazy indexing classes:
 
-.. ipython:: python
+.. jupyter-execute::
 
     var.isel(time=0)._data
 
diff -pruN 2025.03.1-8/doc/internals/interoperability.rst 2025.10.1-1/doc/internals/interoperability.rst
--- 2025.03.1-8/doc/internals/interoperability.rst	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/doc/internals/interoperability.rst	2025-10-10 10:38:05.000000000 +0000
@@ -10,7 +10,7 @@ This interoperability comes via a set of
 
 - :ref:`Custom file backends <add_a_backend>` via the :py:class:`~xarray.backends.BackendEntrypoint` system,
 - Numpy-like :ref:`"duck" array wrapping <internals.duckarrays>`, which supports the `Python Array API Standard <https://data-apis.org/array-api/latest/>`_,
-- :ref:`Chunked distributed array computation <internals.chunkedarrays>` via the :py:class:`~xarray.core.parallelcompat.ChunkManagerEntrypoint` system,
+- :ref:`Chunked distributed array computation <internals.chunkedarrays>` via the :py:class:`~xarray.namedarray.parallelcompat.ChunkManagerEntrypoint` system,
 - Custom :py:class:`~xarray.Index` objects for :ref:`flexible label-based lookups <internals.custom indexes>`,
 - Extending xarray objects with domain-specific methods via :ref:`custom accessors <internals.accessors>`.
 
diff -pruN 2025.03.1-8/doc/internals/time-coding.rst 2025.10.1-1/doc/internals/time-coding.rst
--- 2025.03.1-8/doc/internals/time-coding.rst	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/doc/internals/time-coding.rst	2025-10-10 10:38:05.000000000 +0000
@@ -1,5 +1,5 @@
-.. ipython:: python
-    :suppress:
+.. jupyter-execute::
+    :hide-code:
 
     import numpy as np
     import pandas as pd
@@ -30,19 +30,22 @@ In normal operation :py:func:`pandas.to_
 
 When the arguments are numeric (not strings or ``np.datetime64`` values) ``"unit"`` can be anything from ``'Y'``, ``'W'``, ``'D'``, ``'h'``, ``'m'``, ``'s'``, ``'ms'``, ``'us'`` or ``'ns'``, though the returned resolution will be ``"ns"``.
 
-.. ipython:: python
+.. jupyter-execute::
 
-    f"Minimum datetime: {pd.to_datetime(int64_min, unit="ns")}"
-    f"Maximum datetime: {pd.to_datetime(int64_max, unit="ns")}"
+    print(f"Minimum datetime: {pd.to_datetime(int64_min, unit="ns")}")
+    print(f"Maximum datetime: {pd.to_datetime(int64_max, unit="ns")}")
 
 For input values which can't be represented in nanosecond resolution an :py:class:`pandas.OutOfBoundsDatetime` exception is raised:
 
-.. ipython:: python
+.. jupyter-execute::
 
     try:
         dtime = pd.to_datetime(int64_max, unit="us")
     except Exception as err:
         print(err)
+
+.. jupyter-execute::
+
     try:
         dtime = pd.to_datetime(uint64_max, unit="ns")
         print("Wrong:", dtime)
@@ -56,12 +59,15 @@ and :py:meth:`pandas.DatetimeIndex.as_un
 
 ``as_unit`` takes one of ``'s'``, ``'ms'``, ``'us'``, ``'ns'`` as an argument. That means we are able to represent datetimes with second, millisecond, microsecond or nanosecond resolution.
 
-.. ipython:: python
+.. jupyter-execute::
 
     time = pd.to_datetime(np.datetime64(0, "D"))
     print("Datetime:", time, np.asarray([time.to_numpy()]).dtype)
     print("Datetime as_unit('ms'):", time.as_unit("ms"))
     print("Datetime to_numpy():", time.as_unit("ms").to_numpy())
+
+.. jupyter-execute::
+
     time = pd.to_datetime(np.array([-1000, 1, 2], dtype="datetime64[Y]"))
     print("DatetimeIndex:", time)
     print("DatetimeIndex as_unit('us'):", time.as_unit("us"))
@@ -70,7 +76,7 @@ and :py:meth:`pandas.DatetimeIndex.as_un
 .. warning::
     Input data with resolution higher than ``'ns'`` (eg. ``'ps'``, ``'fs'``, ``'as'``) is truncated (not rounded) at the ``'ns'``-level. This is `currently broken <https://github.com/pandas-dev/pandas/issues/60341>`_ for the ``'ps'`` input, where it is interpreted as ``'ns'``.
 
-    .. ipython:: python
+    .. jupyter-execute::
 
         print("Good:", pd.to_datetime([np.datetime64(1901901901901, "as")]))
         print("Good:", pd.to_datetime([np.datetime64(1901901901901, "fs")]))
@@ -82,7 +88,7 @@ and :py:meth:`pandas.DatetimeIndex.as_un
 .. warning::
     Care has to be taken, as some configurations of input data will raise. The following shows, that we are safe to use :py:func:`pandas.to_datetime` when providing :py:class:`numpy.datetime64` as scalar or numpy array as input.
 
-    .. ipython:: python
+    .. jupyter-execute::
 
         print(
             "Works:",
@@ -119,18 +125,21 @@ The function :py:func:`pandas.to_timedel
 
 In normal operation :py:func:`pandas.to_timedelta` returns a :py:class:`pandas.Timedelta` (for scalar input) or :py:class:`pandas.TimedeltaIndex` (for array-like input) which are ``np.timedelta64`` values with ``ns`` resolution internally. That has the implication, that the usable timedelta covers only roughly 585 years. To accommodate for that, we are working around that limitation in the encoding and decoding step.
 
-.. ipython:: python
+.. jupyter-execute::
 
     f"Maximum timedelta range: ({pd.to_timedelta(int64_min, unit="ns")}, {pd.to_timedelta(int64_max, unit="ns")})"
 
 For input values which can't be represented in nanosecond resolution an :py:class:`pandas.OutOfBoundsTimedelta` exception is raised:
 
-.. ipython:: python
+.. jupyter-execute::
 
     try:
         delta = pd.to_timedelta(int64_max, unit="us")
     except Exception as err:
         print("First:", err)
+
+.. jupyter-execute::
+
     try:
         delta = pd.to_timedelta(uint64_max, unit="ns")
     except Exception as err:
@@ -143,12 +152,15 @@ and :py:meth:`pandas.TimedeltaIndex.as_u
 
 ``as_unit`` takes one of ``'s'``, ``'ms'``, ``'us'``, ``'ns'`` as an argument. That means we are able to represent timedeltas with second, millisecond, microsecond or nanosecond resolution.
 
-.. ipython:: python
+.. jupyter-execute::
 
     delta = pd.to_timedelta(np.timedelta64(1, "D"))
     print("Timedelta:", delta, np.asarray([delta.to_numpy()]).dtype)
     print("Timedelta as_unit('ms'):", delta.as_unit("ms"))
     print("Timedelta to_numpy():", delta.as_unit("ms").to_numpy())
+
+.. jupyter-execute::
+
     delta = pd.to_timedelta([0, 1, 2], unit="D")
     print("TimedeltaIndex:", delta)
     print("TimedeltaIndex as_unit('ms'):", delta.as_unit("ms"))
@@ -157,7 +169,7 @@ and :py:meth:`pandas.TimedeltaIndex.as_u
 .. warning::
     Care has to be taken, as some configurations of input data will raise. The following shows, that we are safe to use :py:func:`pandas.to_timedelta` when providing :py:class:`numpy.timedelta64` as scalar or numpy array as input.
 
-    .. ipython:: python
+    .. jupyter-execute::
 
         print(
             "Works:",
@@ -193,12 +205,12 @@ Timestamp
 
 When arguments are numeric (not strings) "unit" can be anything from ``'Y'``, ``'W'``, ``'D'``, ``'h'``, ``'m'``, ``'s'``, ``'ms'``, ``'us'`` or ``'ns'``, though the returned resolution will be ``"ns"``.
 
-In normal operation :py:class:`pandas.Timestamp` holds the timestamp in the provided resolution, but only one of ``'s'``, ``'ms'``, ``'us'``, ``'ns'``. Lower resolution input is automatically converted to ``'s'``, higher resolution input is cutted to ``'ns'``.
+In normal operation :py:class:`pandas.Timestamp` holds the timestamp in the provided resolution, but only one of ``'s'``, ``'ms'``, ``'us'``, ``'ns'``. Lower resolution input is automatically converted to ``'s'``, higher resolution input is truncated to ``'ns'``.
 
 The same conversion rules apply here as for :py:func:`pandas.to_timedelta` (see `to_timedelta`_).
 Depending on the internal resolution Timestamps can be represented in the range:
 
-.. ipython:: python
+.. jupyter-execute::
 
     for unit in ["s", "ms", "us", "ns"]:
         print(
@@ -210,7 +222,7 @@ Since relaxing the resolution, this enha
 .. warning::
     When initialized with a datetime string this is only defined from ``-9999-01-01`` to ``9999-12-31``.
 
-    .. ipython:: python
+    .. jupyter-execute::
 
         try:
             print("Works:", pd.Timestamp("-9999-01-01 00:00:00"))
@@ -222,7 +234,7 @@ Since relaxing the resolution, this enha
 .. note::
     :py:class:`pandas.Timestamp` is the only current possibility to correctly import time reference strings. It handles non-ISO formatted strings, keeps the resolution of the strings (``'s'``, ``'ms'`` etc.) and imports time zones. When initialized with :py:class:`numpy.datetime64` instead of a string it even overcomes the above limitation of the possible time range.
 
-    .. ipython:: python
+    .. jupyter-execute::
 
         try:
             print("Handles non-ISO:", pd.Timestamp("92-1-8 151542"))
@@ -255,7 +267,7 @@ DatetimeIndex
 :py:class:`pandas.DatetimeIndex` is used to wrap ``np.datetime64`` values or other datetime-likes when encoding. The resolution of the DatetimeIndex depends on the input, but can be only one of ``'s'``, ``'ms'``, ``'us'``, ``'ns'``. Lower resolution input is automatically converted to ``'s'``, higher resolution input is cut to ``'ns'``.
 :py:class:`pandas.DatetimeIndex` will raise :py:class:`pandas.OutOfBoundsDatetime` if the input can't be represented in the given resolution.
 
-.. ipython:: python
+.. jupyter-execute::
 
     try:
         print(
@@ -327,7 +339,7 @@ Decoding of ``values`` with a time unit
 
 5. Finally, the ``values`` (at this point converted to ``int64`` values) are cast to ``datetime64[unit]`` (using the above retrieved unit) and added to the reference time :py:class:`pandas.Timestamp`.
 
-.. ipython:: python
+.. jupyter-execute::
 
     calendar = "proleptic_gregorian"
     values = np.array([-1000 * 365, 0, 1000 * 365], dtype="int64")
@@ -336,14 +348,14 @@ Decoding of ``values`` with a time unit
     assert dt.dtype == "datetime64[us]"
     dt
 
-.. ipython:: python
+.. jupyter-execute::
 
     units = "microseconds since 2000-01-01 00:00:00"
     dt = xr.coding.times.decode_cf_datetime(values, units, calendar, time_unit="s")
     assert dt.dtype == "datetime64[us]"
     dt
 
-.. ipython:: python
+.. jupyter-execute::
 
     values = np.array([0, 0.25, 0.5, 0.75, 1.0], dtype="float64")
     units = "days since 2000-01-01 00:00:00.001"
@@ -351,7 +363,7 @@ Decoding of ``values`` with a time unit
     assert dt.dtype == "datetime64[ms]"
     dt
 
-.. ipython:: python
+.. jupyter-execute::
 
     values = np.array([0, 0.25, 0.5, 0.75, 1.0], dtype="float64")
     units = "hours since 2000-01-01"
@@ -359,7 +371,7 @@ Decoding of ``values`` with a time unit
     assert dt.dtype == "datetime64[s]"
     dt
 
-.. ipython:: python
+.. jupyter-execute::
 
     values = np.array([0, 0.25, 0.5, 0.75, 1.0], dtype="float64")
     units = "hours since 2000-01-01 00:00:00 03:30"
@@ -367,7 +379,7 @@ Decoding of ``values`` with a time unit
     assert dt.dtype == "datetime64[s]"
     dt
 
-.. ipython:: python
+.. jupyter-execute::
 
     values = np.array([-2002 * 365 - 121, -366, 365, 2000 * 365 + 119], dtype="int64")
     units = "days since 0001-01-01 00:00:00"
@@ -393,8 +405,7 @@ For encoding the process is more or less
 11. Divide ``time_deltas`` by ``delta``, use floor division (integer) or normal division (float)
 12. Return result
 
-.. ipython:: python
-    :okwarning:
+.. jupyter-execute::
 
     calendar = "proleptic_gregorian"
     dates = np.array(
@@ -413,9 +424,12 @@ For encoding the process is more or less
     values, _, _ = xr.coding.times.encode_cf_datetime(
         dates, units, calendar, dtype=np.dtype("int64")
     )
-    print(values)
+    print(values, units)
     np.testing.assert_array_equal(values, orig_values)
 
+.. jupyter-execute::
+    :stderr:
+
     dates = np.array(
         [
             "-2000-01-01T01:00:00",
@@ -428,11 +442,15 @@ For encoding the process is more or less
     orig_values = np.array(
         [-2002 * 365 - 121, -366, 365, 2000 * 365 + 119], dtype="int64"
     )
+    orig_values *= 24  # Convert to hours
+    orig_values[0] += 1  # Adjust for the hour offset in dates above
+
     units = "days since 0001-01-01 00:00:00"
     values, units, _ = xr.coding.times.encode_cf_datetime(
         dates, units, calendar, dtype=np.dtype("int64")
     )
     print(values, units)
+    np.testing.assert_array_equal(values, orig_values)
 
 .. _internals.default_timeunit:
 
@@ -441,17 +459,17 @@ Default Time Unit
 
 The current default time unit of xarray is ``'ns'``. When setting keyword argument ``time_unit`` unit to ``'s'`` (the lowest resolution pandas allows) datetimes will be converted to at least ``'s'``-resolution, if possible. The same holds true for ``'ms'`` and ``'us'``.
 
-.. ipython:: python
+.. jupyter-execute::
 
     attrs = {"units": "hours since 2000-01-01"}
     ds = xr.Dataset({"time": ("time", [0, 1, 2, 3], attrs)})
     ds.to_netcdf("test-datetimes1.nc")
 
-.. ipython:: python
+.. jupyter-execute::
 
     xr.open_dataset("test-datetimes1.nc")
 
-.. ipython:: python
+.. jupyter-execute::
 
     coder = xr.coders.CFDatetimeCoder(time_unit="s")
     xr.open_dataset("test-datetimes1.nc", decode_times=coder)
@@ -459,17 +477,17 @@ The current default time unit of xarray
 If a coarser unit is requested the datetimes are decoded into their native
 on-disk resolution, if possible.
 
-.. ipython:: python
+.. jupyter-execute::
 
     attrs = {"units": "milliseconds since 2000-01-01"}
     ds = xr.Dataset({"time": ("time", [0, 1, 2, 3], attrs)})
     ds.to_netcdf("test-datetimes2.nc")
 
-.. ipython:: python
+.. jupyter-execute::
 
     xr.open_dataset("test-datetimes2.nc")
 
-.. ipython:: python
+.. jupyter-execute::
 
     coder = xr.coders.CFDatetimeCoder(time_unit="s")
     xr.open_dataset("test-datetimes2.nc", decode_times=coder)
@@ -477,29 +495,28 @@ on-disk resolution, if possible.
 Similar logic applies for decoding timedelta values. The default resolution is
 ``"ns"``:
 
-.. ipython:: python
+.. jupyter-execute::
 
     attrs = {"units": "hours"}
     ds = xr.Dataset({"time": ("time", [0, 1, 2, 3], attrs)})
     ds.to_netcdf("test-timedeltas1.nc")
 
-.. ipython:: python
-    :okwarning:
+.. jupyter-execute::
+    :stderr:
 
     xr.open_dataset("test-timedeltas1.nc")
 
 By default, timedeltas will be decoded to the same resolution as datetimes:
 
-.. ipython:: python
-    :okwarning:
+.. jupyter-execute::
 
     coder = xr.coders.CFDatetimeCoder(time_unit="s")
-    xr.open_dataset("test-timedeltas1.nc", decode_times=coder)
+    xr.open_dataset("test-timedeltas1.nc", decode_times=coder, decode_timedelta=True)
 
 but if one would like to decode timedeltas to a different resolution, one can
 provide a coder specifically for timedeltas to ``decode_timedelta``:
 
-.. ipython:: python
+.. jupyter-execute::
 
     timedelta_coder = xr.coders.CFTimedeltaCoder(time_unit="ms")
     xr.open_dataset(
@@ -509,29 +526,44 @@ provide a coder specifically for timedel
 As with datetimes, if a coarser unit is requested the timedeltas are decoded
 into their native on-disk resolution, if possible:
 
-.. ipython:: python
+.. jupyter-execute::
 
     attrs = {"units": "milliseconds"}
     ds = xr.Dataset({"time": ("time", [0, 1, 2, 3], attrs)})
     ds.to_netcdf("test-timedeltas2.nc")
 
-.. ipython:: python
-    :okwarning:
+.. jupyter-execute::
 
-    xr.open_dataset("test-timedeltas2.nc")
+    xr.open_dataset("test-timedeltas2.nc", decode_timedelta=True)
 
-.. ipython:: python
-    :okwarning:
+.. jupyter-execute::
 
     coder = xr.coders.CFDatetimeCoder(time_unit="s")
-    xr.open_dataset("test-timedeltas2.nc", decode_times=coder)
+    xr.open_dataset("test-timedeltas2.nc", decode_times=coder, decode_timedelta=True)
 
 To opt-out of timedelta decoding (see issue `Undesired decoding to timedelta64 <https://github.com/pydata/xarray/issues/1621>`_) pass ``False`` to ``decode_timedelta``:
 
-.. ipython:: python
+.. jupyter-execute::
 
     xr.open_dataset("test-timedeltas2.nc", decode_timedelta=False)
 
 .. note::
     Note that in the future the default value of ``decode_timedelta`` will be
     ``False`` rather than ``None``.
+
+
+
+.. jupyter-execute::
+    :hide-code:
+
+    # Cleanup
+    import os
+
+    for f in [
+        "test-datetimes1.nc",
+        "test-datetimes2.nc",
+        "test-timedeltas1.nc",
+        "test-timedeltas2.nc",
+    ]:
+        if os.path.exists(f):
+            os.remove(f)
diff -pruN 2025.03.1-8/doc/internals/zarr-encoding-spec.rst 2025.10.1-1/doc/internals/zarr-encoding-spec.rst
--- 2025.03.1-8/doc/internals/zarr-encoding-spec.rst	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/doc/internals/zarr-encoding-spec.rst	2025-10-10 10:38:05.000000000 +0000
@@ -19,26 +19,57 @@ Xarray ``Dataset`` objects.
 
 Second, from Xarray's point of view, the key difference between
 NetCDF and Zarr is that all NetCDF arrays have *dimension names* while Zarr
-arrays do not. Therefore, in order to store NetCDF data in Zarr, Xarray must
-somehow encode and decode the name of each array's dimensions.
-
-To accomplish this, Xarray developers decided to define a special Zarr array
-attribute: ``_ARRAY_DIMENSIONS``. The value of this attribute is a list of
-dimension names (strings), for example ``["time", "lon", "lat"]``. When writing
-data to Zarr, Xarray sets this attribute on all variables based on the variable
-dimensions. When reading a Zarr group, Xarray looks for this attribute on all
-arrays, raising an error if it can't be found. The attribute is used to define
-the variable dimension names and then removed from the attributes dictionary
-returned to the user.
-
-Because of these choices, Xarray cannot read arbitrary array data, but only
-Zarr data with valid ``_ARRAY_DIMENSIONS`` or
-`NCZarr <https://docs.unidata.ucar.edu/nug/current/nczarr_head.html>`_ attributes
-on each array (NCZarr dimension names are defined in the ``.zarray`` file).
-
-After decoding the ``_ARRAY_DIMENSIONS`` or NCZarr attribute and assigning the variable
-dimensions, Xarray proceeds to [optionally] decode each variable using its
-standard CF decoding machinery used for NetCDF data (see :py:func:`decode_cf`).
+arrays do not. In Zarr v2, Xarray uses an ad-hoc convention to encode and decode
+the name of each array's dimensions. However, starting with Zarr v3, the
+``dimension_names`` attribute provides a formal convention for storing the
+NetCDF data model in Zarr.
+
+Dimension Encoding in Zarr Formats
+-----------------------------------
+
+Xarray encodes array dimensions differently depending on the Zarr format version:
+
+**Zarr V2 Format:**
+Xarray uses a special Zarr array attribute: ``_ARRAY_DIMENSIONS``. The value of this
+attribute is a list of dimension names (strings), for example ``["time", "lon", "lat"]``.
+When writing data to Zarr V2, Xarray sets this attribute on all variables based on the
+variable dimensions. This attribute is visible when accessing arrays directly with
+zarr-python.
+
+**Zarr V3 Format:**
+Xarray uses the native ``dimension_names`` field in the array metadata. This is part
+of the official Zarr V3 specification and is not stored as a regular attribute.
+When accessing arrays with zarr-python, this information is available in the array's
+metadata but not in the attributes dictionary.
+
+When reading a Zarr group, Xarray looks for dimension information in the appropriate
+location based on the format version, raising an error if it can't be found. The
+dimension information is used to define the variable dimension names and then
+(for Zarr V2) removed from the attributes dictionary returned to the user.
+
+CF Conventions
+--------------
+
+Xarray uses its standard CF encoding/decoding functionality for handling metadata
+(see :py:func:`decode_cf`). This includes encoding concepts such as dimensions and
+coordinates. The ``coordinates`` attribute, which lists coordinate variables
+(e.g., ``"yc xc"`` for spatial coordinates), is one part of the broader CF conventions
+used to describe metadata in NetCDF and Zarr.
+
+Compatibility and Reading
+-------------------------
+
+Because of these encoding choices, Xarray cannot read arbitrary Zarr arrays, but only
+Zarr data with valid dimension metadata. Xarray supports:
+
+- Zarr V2 arrays with ``_ARRAY_DIMENSIONS`` attributes
+- Zarr V3 arrays with ``dimension_names`` metadata
+- `NCZarr <https://docs.unidata.ucar.edu/nug/current/nczarr_head.html>`_ format
+  (dimension names are defined in the ``.zarray`` file)
+
+After decoding the dimension information and assigning the variable dimensions,
+Xarray proceeds to [optionally] decode each variable using its standard CF decoding
+machinery used for NetCDF data.
 
 Finally, it's worth noting that Xarray writes (and attempts to read)
 "consolidated metadata" by default (the ``.zmetadata`` file), which is another
@@ -49,27 +80,107 @@ warning about poor performance when read
 explicitly set ``consolidated=False``. See :ref:`io.zarr.consolidated_metadata`
 for more details.
 
-As a concrete example, here we write a tutorial dataset to Zarr and then
-re-open it directly with Zarr:
+Examples: Zarr Format Differences
+----------------------------------
+
+The following examples demonstrate how dimension and coordinate encoding differs
+between Zarr format versions. We'll use the same tutorial dataset but write it
+in different formats to show what users will see when accessing the files directly
+with zarr-python.
+
+**Example 1: Zarr V2 Format**
 
-.. ipython:: python
-    :okwarning:
+.. jupyter-execute::
 
     import os
     import xarray as xr
     import zarr
 
+    # Load tutorial dataset and write as Zarr V2
     ds = xr.tutorial.load_dataset("rasm")
-    ds.to_zarr("rasm.zarr", mode="w")
+    ds.to_zarr("rasm_v2.zarr", mode="w", consolidated=False, zarr_format=2)
+
+    # Open with zarr-python and examine attributes
+    zgroup = zarr.open("rasm_v2.zarr")
+    print("Zarr V2 - Tair attributes:")
+    tair_attrs = dict(zgroup["Tair"].attrs)
+    for key, value in tair_attrs.items():
+        print(f"  '{key}': {repr(value)}")
+
+.. jupyter-execute::
+    :hide-code:
+
+    import shutil
+    shutil.rmtree("rasm_v2.zarr")
+
+**Example 2: Zarr V3 Format**
+
+.. jupyter-execute::
+
+    # Write the same dataset as Zarr V3
+    ds.to_zarr("rasm_v3.zarr", mode="w", consolidated=False, zarr_format=3)
+
+    # Open with zarr-python and examine attributes
+    zgroup = zarr.open("rasm_v3.zarr")
+    print("Zarr V3 - Tair attributes:")
+    tair_attrs = dict(zgroup["Tair"].attrs)
+    for key, value in tair_attrs.items():
+        print(f"  '{key}': {repr(value)}")
+
+    # For Zarr V3, dimension information is in metadata
+    tair_array = zgroup["Tair"]
+    print(f"\nZarr V3 - dimension_names in metadata: {tair_array.metadata.dimension_names}")
+
+.. jupyter-execute::
+    :hide-code:
+
+    import shutil
+    shutil.rmtree("rasm_v3.zarr")
+
+
+Chunk Key Encoding
+------------------
+
+When writing data to Zarr stores, Xarray supports customizing how chunk keys are encoded
+through the ``chunk_key_encoding`` parameter in the variable's encoding dictionary. This
+is particularly useful when working with Zarr V2 arrays and you need to control the
+dimension separator in chunk keys.
+
+For example, to specify a custom separator for chunk keys:
+
+.. jupyter-execute::
+
+    import xarray as xr
+    import numpy as np
+    from zarr.core.chunk_key_encodings import V2ChunkKeyEncoding
+
+    # Create a custom chunk key encoding with "/" as separator
+    enc = V2ChunkKeyEncoding(separator="/").to_dict()
 
-    zgroup = zarr.open("rasm.zarr")
-    print(os.listdir("rasm.zarr"))
-    print(zgroup.tree())
-    dict(zgroup["Tair"].attrs)
+    # Create and write a dataset with custom chunk key encoding
+    arr = np.ones((42, 100))
+    ds = xr.DataArray(arr, name="var1").to_dataset()
+    ds.to_zarr(
+        "example.zarr",
+        zarr_format=2,
+        mode="w",
+        encoding={"var1": {"chunks": (42, 50), "chunk_key_encoding": enc}},
+    )
+
+The ``chunk_key_encoding`` option accepts a dictionary that specifies the encoding
+configuration. For Zarr V2 arrays, you can use the ``V2ChunkKeyEncoding`` class from
+``zarr.core.chunk_key_encodings`` to generate this configuration. This is particularly
+useful when you need to ensure compatibility with specific Zarr V2 storage layouts or
+when working with tools that expect a particular chunk key format.
+
+.. note::
+    The ``chunk_key_encoding`` option is only relevant when writing to Zarr stores.
+    When reading Zarr arrays, Xarray automatically detects and uses the appropriate
+    chunk key encoding based on the store's format and configuration.
 
-.. ipython:: python
-    :suppress:
+.. jupyter-execute::
+    :hide-code:
 
     import shutil
 
-    shutil.rmtree("rasm.zarr")
+    shutil.rmtree("example.zarr")
diff -pruN 2025.03.1-8/doc/tutorials-and-videos.rst 2025.10.1-1/doc/tutorials-and-videos.rst
--- 2025.03.1-8/doc/tutorials-and-videos.rst	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/doc/tutorials-and-videos.rst	1970-01-01 00:00:00.000000000 +0000
@@ -1,32 +0,0 @@
-
-Tutorials and Videos
-====================
-
-
-Tutorials
-----------
-
-- `Xarray's Tutorials`_ repository
-- The `UW eScience Institute's Geohackweek`_ tutorial on xarray for geospatial data scientists.
-- `Nicolas Fauchereau's 2015 tutorial`_ on xarray for netCDF users.
-
-
-
-Videos
--------
-
-.. include:: videos-gallery.txt
-
-
-Books, Chapters and Articles
------------------------------
-
-- Stephan Hoyer and Joe Hamman's `Journal of Open Research Software paper`_ describing the xarray project.
-
-
-.. _Xarray's Tutorials: https://xarray-contrib.github.io/xarray-tutorial/
-.. _Journal of Open Research Software paper: https://doi.org/10.5334/jors.148
-.. _UW eScience Institute's Geohackweek : https://geohackweek.github.io/nDarrays/
-.. _tutorial: https://github.com/Unidata/unidata-users-workshop/blob/master/notebooks/xray-tutorial.ipynb
-.. _with answers: https://github.com/Unidata/unidata-users-workshop/blob/master/notebooks/xray-tutorial-with-answers.ipynb
-.. _Nicolas Fauchereau's 2015 tutorial: https://nbviewer.iPython.org/github/nicolasfauchereau/metocean/blob/master/notebooks/xray.ipynb
diff -pruN 2025.03.1-8/doc/user-guide/combining.rst 2025.10.1-1/doc/user-guide/combining.rst
--- 2025.03.1-8/doc/user-guide/combining.rst	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/doc/user-guide/combining.rst	2025-10-10 10:38:05.000000000 +0000
@@ -3,8 +3,9 @@
 Combining data
 --------------
 
-.. ipython:: python
-    :suppress:
+.. jupyter-execute::
+    :hide-code:
+    :hide-output:
 
     import numpy as np
     import pandas as pd
@@ -12,6 +13,8 @@ Combining data
 
     np.random.seed(123456)
 
+    %xmode minimal
+
 * For combining datasets or data arrays along a single dimension, see concatenate_.
 * For combining datasets with different variables, see merge_.
 * For combining datasets or data arrays with different indexes or missing values, see combine_.
@@ -22,49 +25,61 @@ Combining data
 Concatenate
 ~~~~~~~~~~~
 
-To combine :py:class:`~xarray.Dataset`s / :py:class:`~xarray.DataArray`s along an existing or new dimension
+To combine :py:class:`~xarray.Dataset` / :py:class:`~xarray.DataArray` objects along an existing or new dimension
 into a larger object, you can use :py:func:`~xarray.concat`. ``concat``
 takes an iterable of ``DataArray`` or ``Dataset`` objects, as well as a
 dimension name, and concatenates along that dimension:
 
-.. ipython:: python
+.. jupyter-execute::
 
     da = xr.DataArray(
         np.arange(6).reshape(2, 3), [("x", ["a", "b"]), ("y", [10, 20, 30])]
     )
     da.isel(y=slice(0, 1))  # same as da[:, :1]
+
+.. jupyter-execute::
+
     # This resembles how you would use np.concatenate:
     xr.concat([da[:, :1], da[:, 1:]], dim="y")
+
+.. jupyter-execute::
+
     # For more friendly pandas-like indexing you can use:
     xr.concat([da.isel(y=slice(0, 1)), da.isel(y=slice(1, None))], dim="y")
 
 In addition to combining along an existing dimension, ``concat`` can create a
 new dimension by stacking lower dimensional arrays together:
 
-.. ipython:: python
+.. jupyter-execute::
 
     da.sel(x="a")
+
+.. jupyter-execute::
+
     xr.concat([da.isel(x=0), da.isel(x=1)], "x")
 
 If the second argument to ``concat`` is a new dimension name, the arrays will
 be concatenated along that new dimension, which is always inserted as the first
 dimension:
 
-.. ipython:: python
+.. jupyter-execute::
 
-    xr.concat([da.isel(x=0), da.isel(x=1)], "new_dim")
+    da0 = da.isel(x=0, drop=True)
+    da1 = da.isel(x=1, drop=True)
+
+    xr.concat([da0, da1], "new_dim")
 
 The second argument to ``concat`` can also be an :py:class:`~pandas.Index` or
 :py:class:`~xarray.DataArray` object as well as a string, in which case it is
 used to label the values along the new dimension:
 
-.. ipython:: python
+.. jupyter-execute::
 
-    xr.concat([da.isel(x=0), da.isel(x=1)], pd.Index([-90, -100], name="new_dim"))
+    xr.concat([da0, da1], pd.Index([-90, -100], name="new_dim"))
 
 Of course, ``concat`` also works on ``Dataset`` objects:
 
-.. ipython:: python
+.. jupyter-execute::
 
     ds = da.to_dataset(name="foo")
     xr.concat([ds.sel(x="a"), ds.sel(x="b")], "x")
@@ -75,6 +90,12 @@ between datasets. With the default param
 variables into memory to compare them between datasets. This may be prohibitively
 expensive if you are manipulating your dataset lazily using :ref:`dask`.
 
+.. note::
+
+   In a future version of xarray the default values for many of these options
+   will change. You can opt into the new default values early using
+   ``xr.set_options(use_new_combine_kwarg_defaults=True)``.
+
 .. _merge:
 
 Merge
@@ -85,39 +106,54 @@ To combine variables and coordinates bet
 ``Dataset``, ``DataArray`` or dictionaries of objects convertible to
 ``DataArray`` objects:
 
-.. ipython:: python
+.. jupyter-execute::
 
     xr.merge([ds, ds.rename({"foo": "bar"})])
+
+.. jupyter-execute::
+
     xr.merge([xr.DataArray(n, name="var%d" % n) for n in range(5)])
 
 If you merge another dataset (or a dictionary including data array objects), by
 default the resulting dataset will be aligned on the **union** of all index
 coordinates:
 
-.. ipython:: python
+.. note::
+
+   In a future version of xarray the default value for ``join`` and ``compat``
+   will change. This change will mean that xarray will no longer attempt
+   to align the indices of the merged dataset. You can opt into the new default
+   values early using ``xr.set_options(use_new_combine_kwarg_defaults=True)``.
+   Or explicitly set ``join='outer'`` to preserve old behavior.
+
+.. jupyter-execute::
 
     other = xr.Dataset({"bar": ("x", [1, 2, 3, 4]), "x": list("abcd")})
-    xr.merge([ds, other])
+    xr.merge([ds, other], join="outer")
 
 This ensures that ``merge`` is non-destructive. ``xarray.MergeError`` is raised
 if you attempt to merge two variables with the same name but different values:
 
-.. ipython::
+.. jupyter-execute::
+    :raises:
+
+    xr.merge([ds, ds + 1])
+
+
+.. note::
+
+    In a future version of xarray the default value for ``compat`` will change
+    from ``compat='no_conflicts'`` to ``compat='override'``. In this scenario
+    the values in the first object override all the values in other objects.
+
+    .. jupyter-execute::
 
-    @verbatim
-    In [1]: xr.merge([ds, ds + 1])
-    MergeError: conflicting values for variable 'foo' on objects to be combined:
-    first value: <xarray.Variable (x: 2, y: 3)>
-    array([[ 0.4691123 , -0.28286334, -1.5090585 ],
-           [-1.13563237,  1.21211203, -0.17321465]])
-    second value: <xarray.Variable (x: 2, y: 3)>
-    array([[ 1.4691123 ,  0.71713666, -0.5090585 ],
-           [-0.13563237,  2.21211203,  0.82678535]])
+        xr.merge([ds, ds + 1], compat="override")
 
 The same non-destructive merging between ``DataArray`` index coordinates is
 used in the :py:class:`~xarray.Dataset` constructor:
 
-.. ipython:: python
+.. jupyter-execute::
 
     xr.Dataset({"a": da.isel(x=slice(0, 1)), "b": da.isel(x=slice(1, 2))})
 
@@ -132,11 +168,14 @@ using values from the called object to f
 are the union of coordinate labels. Vacant cells as a result of the outer-join
 are filled with ``NaN``. For example:
 
-.. ipython:: python
+.. jupyter-execute::
 
     ar0 = xr.DataArray([[0, 0], [0, 0]], [("x", ["a", "b"]), ("y", [-1, 0])])
     ar1 = xr.DataArray([[1, 1], [1, 1]], [("x", ["b", "c"]), ("y", [0, 1])])
     ar0.combine_first(ar1)
+
+.. jupyter-execute::
+
     ar1.combine_first(ar0)
 
 For datasets, ``ds0.combine_first(ds1)`` works similarly to
@@ -144,6 +183,11 @@ For datasets, ``ds0.combine_first(ds1)``
 there are conflicting values in variables to be merged, whereas
 ``.combine_first`` defaults to the calling object's values.
 
+.. note::
+
+   In a future version of xarray the default options for ``xr.merge`` will change
+   such that the behavior matches ``combine_first``.
+
 .. _update:
 
 Update
@@ -153,7 +197,7 @@ In contrast to ``merge``, :py:meth:`~xar
 in-place without checking for conflicts, and will overwrite any existing
 variables with new values:
 
-.. ipython:: python
+.. jupyter-execute::
 
     ds.update({"space": ("space", [10.2, 9.4, 3.9])})
 
@@ -164,14 +208,14 @@ replace all dataset variables that use i
 ``update`` also performs automatic alignment if necessary. Unlike ``merge``, it
 maintains the alignment of the original array instead of merging indexes:
 
-.. ipython:: python
+.. jupyter-execute::
 
     ds.update(other)
 
 The exact same alignment logic when setting a variable with ``__setitem__``
 syntax:
 
-.. ipython:: python
+.. jupyter-execute::
 
     ds["baz"] = xr.DataArray([9, 9, 9, 9, 9], coords=[("x", list("abcde"))])
     ds.baz
@@ -187,14 +231,14 @@ the optional ``compat`` argument on ``co
 :py:attr:`~xarray.Dataset.equals` checks dimension names, indexes and array
 values:
 
-.. ipython:: python
+.. jupyter-execute::
 
     da.equals(da.copy())
 
 :py:attr:`~xarray.Dataset.identical` also checks attributes, and the name of each
 object:
 
-.. ipython:: python
+.. jupyter-execute::
 
     da.identical(da.rename("bar"))
 
@@ -202,7 +246,7 @@ object:
 check that allows variables to have different dimensions, as long as values
 are constant along those new dimensions:
 
-.. ipython:: python
+.. jupyter-execute::
 
     left = xr.Dataset(coords={"x": 0})
     right = xr.Dataset({"x": [0, 0, 0]})
@@ -214,7 +258,7 @@ missing values marked by ``NaN`` in the
 In contrast, the ``==`` operation performs element-wise comparison (like
 numpy):
 
-.. ipython:: python
+.. jupyter-execute::
 
     da == da.copy()
 
@@ -232,11 +276,11 @@ methods it allows the merging of xarray
 have ``NaN`` values. This can be used to combine data with overlapping
 coordinates as long as any non-missing values agree or are disjoint:
 
-.. ipython:: python
+.. jupyter-execute::
 
     ds1 = xr.Dataset({"a": ("x", [10, 20, 30, np.nan])}, {"x": [1, 2, 3, 4]})
     ds2 = xr.Dataset({"a": ("x", [np.nan, 30, 40, 50])}, {"x": [2, 3, 4, 5]})
-    xr.merge([ds1, ds2], compat="no_conflicts")
+    xr.merge([ds1, ds2], join="outer", compat="no_conflicts")
 
 Note that due to the underlying representation of missing values as floating
 point numbers (``NaN``), variable data type is not always preserved when merging
@@ -264,12 +308,15 @@ each processor wrote out data to a separ
 into 4 parts, 2 each along both the x and y axes, requires organising the
 datasets into a doubly-nested list, e.g:
 
-.. ipython:: python
+.. jupyter-execute::
 
     arr = xr.DataArray(
         name="temperature", data=np.random.randint(5, size=(2, 2)), dims=["x", "y"]
     )
     arr
+
+.. jupyter-execute::
+
     ds_grid = [[arr, arr], [arr, arr]]
     xr.combine_nested(ds_grid, concat_dim=["x", "y"])
 
@@ -279,7 +326,7 @@ along two times, and contain two differe
 to ``'concat_dim'`` to specify the dimension of the nested list over which
 we wish to use ``merge`` instead of ``concat``:
 
-.. ipython:: python
+.. jupyter-execute::
 
     temp = xr.DataArray(name="temperature", data=np.random.randn(2), dims=["t"])
     precip = xr.DataArray(name="precipitation", data=np.random.randn(2), dims=["t"])
@@ -294,14 +341,13 @@ Here we combine two datasets using their
 they are concatenated in order based on the values in their dimension
 coordinates, not on their position in the list passed to ``combine_by_coords``.
 
-.. ipython:: python
-    :okwarning:
+.. jupyter-execute::
 
     x1 = xr.DataArray(name="foo", data=np.random.randn(3), coords=[("x", [0, 1, 2])])
     x2 = xr.DataArray(name="foo", data=np.random.randn(3), coords=[("x", [3, 4, 5])])
     xr.combine_by_coords([x2, x1])
 
-These functions can be used by :py:func:`~xarray.open_mfdataset` to open many
+These functions are used by :py:func:`~xarray.open_mfdataset` to open many
 files as one dataset. The particular function used is specified by setting the
 argument ``'combine'`` to ``'by_coords'`` or ``'nested'``. This is useful for
 situations where your data is split across many files in multiple locations,
diff -pruN 2025.03.1-8/doc/user-guide/complex-numbers.rst 2025.10.1-1/doc/user-guide/complex-numbers.rst
--- 2025.03.1-8/doc/user-guide/complex-numbers.rst	1970-01-01 00:00:00.000000000 +0000
+++ 2025.10.1-1/doc/user-guide/complex-numbers.rst	2025-10-10 10:38:05.000000000 +0000
@@ -0,0 +1,128 @@
+.. currentmodule:: xarray
+
+.. _complex:
+
+Complex Numbers
+===============
+
+.. jupyter-execute::
+    :hide-code:
+
+    import numpy as np
+    import xarray as xr
+
+Xarray leverages NumPy to seamlessly handle complex numbers in :py:class:`~xarray.DataArray` and :py:class:`~xarray.Dataset` objects.
+
+In the examples below, we are using a DataArray named ``da`` with complex elements (of :math:`\mathbb{C}`):
+
+.. jupyter-execute::
+
+    data = np.array([[1 + 2j, 3 + 4j], [5 + 6j, 7 + 8j]])
+    da = xr.DataArray(
+        data,
+        dims=["x", "y"],
+        coords={"x": ["a", "b"], "y": [1, 2]},
+        name="complex_nums",
+    )
+
+
+Operations on Complex Data
+--------------------------
+You can access real and imaginary components using the ``.real`` and ``.imag`` attributes. Most NumPy universal functions (ufuncs) like :py:doc:`numpy.abs <numpy:reference/generated/numpy.absolute>` or :py:doc:`numpy.angle <numpy:reference/generated/numpy.angle>` work directly.
+
+.. jupyter-execute::
+
+    da.real
+
+.. jupyter-execute::
+
+    np.abs(da)
+
+.. note::
+    Like NumPy, ``.real`` and ``.imag`` typically return *views*, not copies, of the original data.
+
+
+Reading and Writing Complex Data
+--------------------------------
+
+Writing complex data to NetCDF files (see :ref:`io.netcdf`) is supported via :py:meth:`~xarray.DataArray.to_netcdf` using specific backend engines that handle complex types:
+
+
+.. tab:: h5netcdf
+
+   This requires the `h5netcdf <https://h5netcdf.org>`_ library to be installed.
+
+   .. jupyter-execute::
+
+       # write the data to disk
+       da.to_netcdf("complex_nums_h5.nc", engine="h5netcdf")
+       # read the file back into memory
+       ds_h5 = xr.open_dataset("complex_nums_h5.nc", engine="h5netcdf")
+       # check the dtype
+       ds_h5[da.name].dtype
+
+
+.. tab:: netcdf4
+
+   Requires the `netcdf4-python (>= 1.7.1) <https://github.com/Unidata/netcdf4-python>`_ library and you have to enable ``auto_complex=True``.
+
+   .. jupyter-execute::
+
+       # write the data to disk
+       da.to_netcdf("complex_nums_nc4.nc", engine="netcdf4", auto_complex=True)
+       # read the file back into memory
+       ds_nc4 = xr.open_dataset(
+           "complex_nums_nc4.nc", engine="netcdf4", auto_complex=True
+       )
+       # check the dtype
+       ds_nc4[da.name].dtype
+
+
+.. warning::
+   The ``scipy`` engine only supports NetCDF V3 and does *not* support complex arrays; writing with ``engine="scipy"`` raises a ``TypeError``.
+
+
+Alternative: Manual Handling
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+If direct writing is not supported (e.g., targeting NetCDF3), you can manually
+split the complex array into separate real and imaginary variables before saving:
+
+.. jupyter-execute::
+
+    # Write data to file
+    ds_manual = xr.Dataset(
+        {
+            f"{da.name}_real": da.real,
+            f"{da.name}_imag": da.imag,
+        }
+    )
+    ds_manual.to_netcdf("complex_manual.nc", engine="scipy")  # Example
+
+    # Read data from file
+    ds = xr.open_dataset("complex_manual.nc", engine="scipy")
+    reconstructed = ds[f"{da.name}_real"] + 1j * ds[f"{da.name}_imag"]
+
+Recommendations
+^^^^^^^^^^^^^^^
+
+- Use ``engine="netcdf4"`` with ``auto_complex=True`` for full compliance and ease.
+- Use ``h5netcdf`` for HDF5-based storage when interoperability with HDF5 is desired.
+- For maximum legacy support (NetCDF3), manually handle real/imaginary components.
+
+.. jupyter-execute::
+    :hide-code:
+
+    # Cleanup
+    import os
+
+    for f in ["complex_nums_nc4.nc", "complex_nums_h5.nc", "complex_manual.nc"]:
+        if os.path.exists(f):
+            os.remove(f)
+
+
+
+See also
+--------
+- :ref:`io.netcdf` — full NetCDF I/O guide
+- `NumPy complex numbers <https://numpy.org/doc/stable/user/basics.types.html#complex>`__
diff -pruN 2025.03.1-8/doc/user-guide/computation.rst 2025.10.1-1/doc/user-guide/computation.rst
--- 2025.03.1-8/doc/user-guide/computation.rst	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/doc/user-guide/computation.rst	2025-10-10 10:38:05.000000000 +0000
@@ -1,6 +1,6 @@
 .. currentmodule:: xarray
 
-.. _comput:
+.. _compute:
 
 ###########
 Computation
@@ -18,8 +18,9 @@ Basic array math
 Arithmetic operations with a single DataArray automatically vectorize (like
 numpy) over all array values:
 
-.. ipython:: python
-    :suppress:
+.. jupyter-execute::
+    :hide-code:
+    :hide-output:
 
     import numpy as np
     import pandas as pd
@@ -27,13 +28,18 @@ numpy) over all array values:
 
     np.random.seed(123456)
 
-.. ipython:: python
+    %xmode minimal
+
+.. jupyter-execute::
 
     arr = xr.DataArray(
         np.random.default_rng(0).random((2, 3)),
         [("x", ["a", "b"]), ("y", [10, 20, 30])],
     )
     arr - 3
+
+.. jupyter-execute::
+
     abs(arr)
 
 You can also use any of numpy's or scipy's many `ufunc`__ functions directly on
@@ -41,31 +47,39 @@ a DataArray:
 
 __ https://numpy.org/doc/stable/reference/ufuncs.html
 
-.. ipython:: python
+.. jupyter-execute::
 
     np.sin(arr)
 
 Use :py:func:`~xarray.where` to conditionally switch between values:
 
-.. ipython:: python
+.. jupyter-execute::
 
     xr.where(arr > 0, "positive", "negative")
 
 Use ``@`` to compute the :py:func:`~xarray.dot` product:
 
-.. ipython:: python
+.. jupyter-execute::
 
     arr @ arr
 
 Data arrays also implement many :py:class:`numpy.ndarray` methods:
 
-.. ipython:: python
+.. jupyter-execute::
 
     arr.round(2)
+
+.. jupyter-execute::
+
     arr.T
 
+.. jupyter-execute::
+
     intarr = xr.DataArray([0, 1, 2, 3, 4, 5])
     intarr << 2  # only supported for int types
+
+.. jupyter-execute::
+
     intarr >> 1
 
 .. _missing_values:
@@ -87,7 +101,7 @@ methods for working with missing data fr
 It returns a new xarray object with the same dimensions as the original object, but with boolean values
 indicating where **missing values** are present.
 
-.. ipython:: python
+.. jupyter-execute::
 
     x = xr.DataArray([0, 1, np.nan, np.nan, 2], dims=["x"])
     x.isnull()
@@ -99,7 +113,7 @@ object has 'True' values in the third an
 object. It returns a new xarray object with the same dimensions as the original object, but with boolean
 values indicating where **non-missing values** are present.
 
-.. ipython:: python
+.. jupyter-execute::
 
     x = xr.DataArray([0, 1, np.nan, np.nan, 2], dims=["x"])
     x.notnull()
@@ -113,7 +127,7 @@ non-missing values along one or more dim
 the same dimensions as the original object, but with each element replaced by the count of non-missing
 values along the specified dimensions.
 
-.. ipython:: python
+.. jupyter-execute::
 
     x = xr.DataArray([0, 1, np.nan, np.nan, 2], dims=["x"])
     x.count()
@@ -126,7 +140,7 @@ the number of non-null elements in x.
 It returns a new xarray object with the same dimensions as the original object, but with missing values
 removed.
 
-.. ipython:: python
+.. jupyter-execute::
 
     x = xr.DataArray([0, 1, np.nan, np.nan, 2], dims=["x"])
     x.dropna(dim="x")
@@ -138,7 +152,7 @@ original order.
 :py:meth:`~xarray.DataArray.fillna` is a method in xarray that can be used to fill missing or null values in an xarray object with a
 specified value or method. It returns a new xarray object with the same dimensions as the original object, but with missing values filled.
 
-.. ipython:: python
+.. jupyter-execute::
 
     x = xr.DataArray([0, 1, np.nan, np.nan, 2], dims=["x"])
     x.fillna(-1)
@@ -151,7 +165,7 @@ returns a new :py:class:`~xarray.DataArr
 xarray object along one or more dimensions. It returns a new xarray object with the same dimensions as the
 original object, but with missing values replaced by the last non-missing value along the specified dimensions.
 
-.. ipython:: python
+.. jupyter-execute::
 
     x = xr.DataArray([0, 1, np.nan, np.nan, 2], dims=["x"])
     x.ffill("x")
@@ -164,7 +178,7 @@ five elements, containing the values [0,
 xarray object along one or more dimensions. It returns a new xarray object with the same dimensions as the original object, but
 with missing values replaced by the next non-missing value along the specified dimensions.
 
-.. ipython:: python
+.. jupyter-execute::
 
     x = xr.DataArray([0, 1, np.nan, np.nan, 2], dims=["x"])
     x.bfill("x")
@@ -180,7 +194,7 @@ Xarray objects also have an :py:meth:`~x
 for filling missing values via 1D interpolation. It returns a new xarray object with the same dimensions
 as the original object, but with missing values interpolated.
 
-.. ipython:: python
+.. jupyter-execute::
 
     x = xr.DataArray(
         [0, 1, np.nan, np.nan, 2],
@@ -212,10 +226,16 @@ Aggregation methods have been updated to
 ``axis``. This allows for very intuitive syntax for aggregation methods that are
 applied along particular dimension(s):
 
-.. ipython:: python
+.. jupyter-execute::
 
     arr.sum(dim="x")
+
+.. jupyter-execute::
+
     arr.std(["x", "y"])
+
+.. jupyter-execute::
+
     arr.min()
 
 
@@ -223,20 +243,20 @@ If you need to figure out the axis numbe
 for wrapping code designed to work with numpy arrays), you can use the
 :py:meth:`~xarray.DataArray.get_axis_num` method:
 
-.. ipython:: python
+.. jupyter-execute::
 
     arr.get_axis_num("y")
 
 These operations automatically skip missing values, like in pandas:
 
-.. ipython:: python
+.. jupyter-execute::
 
     xr.DataArray([1, 2, np.nan, 3]).mean()
 
 If desired, you can disable this behavior by invoking the aggregation method
 with ``skipna=False``.
 
-.. _comput.rolling:
+.. _compute.rolling:
 
 Rolling window operations
 =========================
@@ -244,7 +264,7 @@ Rolling window operations
 ``DataArray`` objects include a :py:meth:`~xarray.DataArray.rolling` method. This
 method supports rolling window aggregation:
 
-.. ipython:: python
+.. jupyter-execute::
 
     arr = xr.DataArray(np.arange(0, 7.5, 0.5).reshape(3, 5), dims=("x", "y"))
     arr
@@ -253,24 +273,27 @@ method supports rolling window aggregati
 name of the dimension as a key (e.g. ``y``) and the window size as the value
 (e.g. ``3``).  We get back a ``Rolling`` object:
 
-.. ipython:: python
+.. jupyter-execute::
 
     arr.rolling(y=3)
 
 Aggregation and summary methods can be applied directly to the ``Rolling``
 object:
 
-.. ipython:: python
+.. jupyter-execute::
 
     r = arr.rolling(y=3)
     r.reduce(np.std)
+
+.. jupyter-execute::
+
     r.mean()
 
 Aggregation results are assigned the coordinate at the end of each window by
 default, but can be centered by passing ``center=True`` when constructing the
 ``Rolling`` object:
 
-.. ipython:: python
+.. jupyter-execute::
 
     r = arr.rolling(y=3, center=True)
     r.mean()
@@ -280,16 +303,19 @@ array produce ``nan``\s.  Setting ``min_
 changes the minimum number of observations within the window required to have
 a value when aggregating:
 
-.. ipython:: python
+.. jupyter-execute::
 
     r = arr.rolling(y=3, min_periods=2)
     r.mean()
+
+.. jupyter-execute::
+
     r = arr.rolling(y=3, center=True, min_periods=2)
     r.mean()
 
 From version 0.17, xarray supports multidimensional rolling,
 
-.. ipython:: python
+.. jupyter-execute::
 
     r = arr.rolling(x=2, y=3, min_periods=2)
     r.mean()
@@ -308,7 +334,7 @@ We can also manually iterate through ``R
         # arr_window is a view of x
         ...
 
-.. _comput.rolling_exp:
+.. _compute.rolling_exp:
 
 While ``rolling`` provides a simple moving average, ``DataArray`` also supports
 an exponential moving average with :py:meth:`~xarray.DataArray.rolling_exp`.
@@ -330,18 +356,21 @@ the last position.
 You can use this for more advanced rolling operations such as strided rolling,
 windowed rolling, convolution, short-time FFT etc.
 
-.. ipython:: python
+.. jupyter-execute::
 
     # rolling with 2-point stride
     rolling_da = r.construct(x="x_win", y="y_win", stride=2)
     rolling_da
+
+.. jupyter-execute::
+
     rolling_da.mean(["x_win", "y_win"], skipna=False)
 
 Because the ``DataArray`` given by ``r.construct('window_dim')`` is a view
 of the original array, it is memory efficient.
 You can also use ``construct`` to compute a weighted rolling sum:
 
-.. ipython:: python
+.. jupyter-execute::
 
     weight = xr.DataArray([0.25, 0.5, 0.25], dims=["window"])
     arr.rolling(y=3).construct(y="window").dot(weight)
@@ -354,7 +383,7 @@ You can also use ``construct`` to comput
   To avoid this, use ``skipna=False`` as the above example.
 
 
-.. _comput.weighted:
+.. _compute.weighted:
 
 Weighted array reductions
 =========================
@@ -363,7 +392,7 @@ Weighted array reductions
 and :py:meth:`Dataset.weighted` array reduction methods. They currently
 support weighted ``sum``, ``mean``, ``std``, ``var`` and ``quantile``.
 
-.. ipython:: python
+.. jupyter-execute::
 
     coords = dict(month=("month", [1, 2, 3]))
 
@@ -372,60 +401,60 @@ support weighted ``sum``, ``mean``, ``st
 
 Create a weighted object:
 
-.. ipython:: python
+.. jupyter-execute::
 
     weighted_prec = prec.weighted(weights)
     weighted_prec
 
 Calculate the weighted sum:
 
-.. ipython:: python
+.. jupyter-execute::
 
     weighted_prec.sum()
 
 Calculate the weighted mean:
 
-.. ipython:: python
+.. jupyter-execute::
 
     weighted_prec.mean(dim="month")
 
 Calculate the weighted quantile:
 
-.. ipython:: python
+.. jupyter-execute::
 
     weighted_prec.quantile(q=0.5, dim="month")
 
 The weighted sum corresponds to:
 
-.. ipython:: python
+.. jupyter-execute::
 
     weighted_sum = (prec * weights).sum()
     weighted_sum
 
 the weighted mean to:
 
-.. ipython:: python
+.. jupyter-execute::
 
     weighted_mean = weighted_sum / weights.sum()
     weighted_mean
 
 the weighted variance to:
 
-.. ipython:: python
+.. jupyter-execute::
 
     weighted_var = weighted_prec.sum_of_squares() / weights.sum()
     weighted_var
 
 and the weighted standard deviation to:
 
-.. ipython:: python
+.. jupyter-execute::
 
     weighted_std = np.sqrt(weighted_var)
     weighted_std
 
 However, the functions also take missing values in the data into account:
 
-.. ipython:: python
+.. jupyter-execute::
 
     data = xr.DataArray([np.nan, 2, 4])
     weights = xr.DataArray([8, 1, 1])
@@ -438,7 +467,7 @@ in 0.6.
 
 If the weights add up to to 0, ``sum`` returns 0:
 
-.. ipython:: python
+.. jupyter-execute::
 
     data = xr.DataArray([1.0, 1.0])
     weights = xr.DataArray([-1.0, 1.0])
@@ -447,7 +476,7 @@ If the weights add up to to 0, ``sum`` r
 
 and ``mean``, ``std`` and ``var`` return ``nan``:
 
-.. ipython:: python
+.. jupyter-execute::
 
     data.weighted(weights).mean()
 
@@ -465,7 +494,7 @@ Coarsen large arrays
 :py:meth:`~xarray.DataArray.coarsen` and :py:meth:`~xarray.Dataset.coarsen`
 methods. This supports block aggregation along multiple dimensions,
 
-.. ipython:: python
+.. jupyter-execute::
 
     x = np.linspace(0, 10, 300)
     t = pd.date_range("1999-12-15", periods=364)
@@ -479,7 +508,7 @@ methods. This supports block aggregation
 In order to take a block mean for every 7 days along ``time`` dimension and
 every 2 points along ``x`` dimension,
 
-.. ipython:: python
+.. jupyter-execute::
 
     da.coarsen(time=7, x=2).mean()
 
@@ -488,14 +517,14 @@ length is not a multiple of the correspo
 You can choose ``boundary='trim'`` or ``boundary='pad'`` options for trimming
 the excess entries or padding ``nan`` to insufficient entries,
 
-.. ipython:: python
+.. jupyter-execute::
 
     da.coarsen(time=30, x=2, boundary="trim").mean()
 
 If you want to apply a specific function to coordinate, you can pass the
 function or method name to ``coord_func`` option,
 
-.. ipython:: python
+.. jupyter-execute::
 
     da.coarsen(time=7, x=2, coord_func={"time": "min"}).mean()
 
@@ -510,15 +539,14 @@ Xarray objects have some handy methods f
 coordinates. :py:meth:`~xarray.DataArray.differentiate` computes derivatives by
 central finite differences using their coordinates,
 
-.. ipython:: python
+.. jupyter-execute::
 
     a = xr.DataArray([0, 1, 2, 3], dims=["x"], coords=[[0.1, 0.11, 0.2, 0.3]])
-    a
     a.differentiate("x")
 
 This method can be used also for multidimensional arrays,
 
-.. ipython:: python
+.. jupyter-execute::
 
     a = xr.DataArray(
         np.arange(8).reshape(4, 2), dims=["x", "y"], coords={"x": [0.1, 0.11, 0.2, 0.3]}
@@ -528,7 +556,7 @@ This method can be used also for multidi
 :py:meth:`~xarray.DataArray.integrate` computes integration based on
 trapezoidal rule using their coordinates,
 
-.. ipython:: python
+.. jupyter-execute::
 
     a.integrate("x")
 
@@ -546,7 +574,7 @@ Xarray objects provide an interface for
 using the least-squares method. :py:meth:`~xarray.DataArray.polyfit` computes the
 best fitting coefficients along a given dimension and for a given order,
 
-.. ipython:: python
+.. jupyter-execute::
 
     x = xr.DataArray(np.arange(10), dims=["x"], name="x")
     a = xr.DataArray(3 + 4 * x, dims=["x"], coords={"x": x})
@@ -556,7 +584,7 @@ best fitting coefficients along a given
 The method outputs a dataset containing the coefficients (and more if ``full=True``).
 The inverse operation is done with :py:meth:`~xarray.polyval`,
 
-.. ipython:: python
+.. jupyter-execute::
 
     xr.polyval(coord=x, coeffs=out.polyfit_coefficients)
 
@@ -576,7 +604,7 @@ user-defined functions and can fit along
 For example, we can fit a relationship between two ``DataArray`` objects, maintaining
 a unique fit at each spatial coordinate but aggregating over the time dimension:
 
-.. ipython:: python
+.. jupyter-execute::
 
     def exponential(x, a, xc):
         return np.exp((x - xc) / a)
@@ -606,7 +634,7 @@ We can also fit multi-dimensional functi
 simultaneously fit a summation of several functions, such as this field containing
 two gaussian peaks:
 
-.. ipython:: python
+.. jupyter-execute::
 
     def gaussian_2d(coords, a, xc, yc, xalpha, yalpha):
         x, y = coords
@@ -660,42 +688,51 @@ operations to work, as commonly done in
 This is best illustrated by a few examples. Consider two one-dimensional
 arrays with different sizes aligned along different dimensions:
 
-.. ipython:: python
+.. jupyter-execute::
 
     a = xr.DataArray([1, 2], [("x", ["a", "b"])])
     a
+
+.. jupyter-execute::
+
     b = xr.DataArray([-1, -2, -3], [("y", [10, 20, 30])])
     b
 
 With xarray, we can apply binary mathematical operations to these arrays, and
 their dimensions are expanded automatically:
 
-.. ipython:: python
+.. jupyter-execute::
 
     a * b
 
 Moreover, dimensions are always reordered to the order in which they first
 appeared:
 
-.. ipython:: python
+.. jupyter-execute::
 
     c = xr.DataArray(np.arange(6).reshape(3, 2), [b["y"], a["x"]])
     c
+
+.. jupyter-execute::
+
     a + c
 
 This means, for example, that you always subtract an array from its transpose:
 
-.. ipython:: python
+.. jupyter-execute::
 
     c - c.T
 
 You can explicitly broadcast xarray data structures by using the
 :py:func:`~xarray.broadcast` function:
 
-.. ipython:: python
+.. jupyter-execute::
 
     a2, b2 = xr.broadcast(a, b)
     a2
+
+.. jupyter-execute::
+
     b2
 
 .. _math automatic alignment:
@@ -711,7 +748,7 @@ Similarly to pandas, this alignment is a
 operations. The default result of a binary operation is by the *intersection*
 (not the union) of coordinate labels:
 
-.. ipython:: python
+.. jupyter-execute::
 
     arr = xr.DataArray(np.arange(3), [("x", range(3))])
     arr + arr[:-1]
@@ -719,17 +756,15 @@ operations. The default result of a bina
 If coordinate values for a dimension are missing on either argument, all
 matching dimensions must have the same size:
 
-.. ipython::
-    :verbatim:
-
-    In [1]: arr + xr.DataArray([1, 2], dims="x")
-    ValueError: arguments without labels along dimension 'x' cannot be aligned because they have different dimension size(s) {2} than the size of the aligned dimension labels: 3
+.. jupyter-execute::
+    :raises:
 
+    arr + xr.DataArray([1, 2], dims="x")
 
 However, one can explicitly change this default automatic alignment type ("inner")
 via :py:func:`~xarray.set_options()` in context manager:
 
-.. ipython:: python
+.. jupyter-execute::
 
     with xr.set_options(arithmetic_join="outer"):
         arr + arr[:1]
@@ -756,20 +791,29 @@ Although index coordinates are aligned,
 values conflict, they will be dropped. This is necessary, for example, because
 indexing turns 1D coordinates into scalar coordinates:
 
-.. ipython:: python
+.. jupyter-execute::
 
     arr[0]
+
+.. jupyter-execute::
+
     arr[1]
+
+.. jupyter-execute::
+
     # notice that the scalar coordinate 'x' is silently dropped
     arr[1] - arr[0]
 
 Still, xarray will persist other coordinates in arithmetic, as long as there
 are no conflicting values:
 
-.. ipython:: python
+.. jupyter-execute::
 
     # only one argument has the 'x' coordinate
     arr[0] + 1
+
+.. jupyter-execute::
+
     # both arguments have the same 'x' coordinate
     arr[0] - arr[0]
 
@@ -779,7 +823,7 @@ Math with datasets
 Datasets support arithmetic operations by automatically looping over all data
 variables:
 
-.. ipython:: python
+.. jupyter-execute::
 
     ds = xr.Dataset(
         {
@@ -792,30 +836,32 @@ variables:
 
 Datasets support most of the same methods found on data arrays:
 
-.. ipython:: python
+.. jupyter-execute::
 
     ds.mean(dim="x")
+
+.. jupyter-execute::
+
     abs(ds)
 
 Datasets also support NumPy ufuncs (requires NumPy v1.13 or newer), or
 alternatively you can use :py:meth:`~xarray.Dataset.map` to map a function
 to each variable in a dataset:
 
-.. ipython:: python
+.. jupyter-execute::
 
-    np.sin(ds)
-    ds.map(np.sin)
+    np.sin(ds) # equivalent to ds.map(np.sin)
 
 Datasets also use looping over variables for *broadcasting* in binary
 arithmetic. You can do arithmetic between any ``DataArray`` and a dataset:
 
-.. ipython:: python
+.. jupyter-execute::
 
     ds + arr
 
 Arithmetic between two datasets matches data variables of the same name:
 
-.. ipython:: python
+.. jupyter-execute::
 
     ds2 = xr.Dataset({"x_and_y": 0, "x_only": 100})
     ds - ds2
@@ -823,7 +869,7 @@ Arithmetic between two datasets matches
 Similarly to index based alignment, the result has the intersection of all
 matching data variables.
 
-.. _comput.wrapping-custom:
+.. _compute.wrapping-custom:
 
 Wrapping custom computation
 ===========================
@@ -858,7 +904,7 @@ functions/methods are written using ``ap
 Simple functions that act independently on each value should work without
 any additional arguments:
 
-.. ipython:: python
+.. jupyter-execute::
 
     squared_error = lambda x, y: (x - y) ** 2
     arr1 = xr.DataArray([0, 1, 2, 3], dims="x")
@@ -885,15 +931,15 @@ to set ``axis=-1``. As an example, here
             np.linalg.norm, x, input_core_dims=[[dim]], kwargs={"ord": ord, "axis": -1}
         )
 
-.. ipython:: python
-    :suppress:
+.. jupyter-execute::
+    :hide-code:
 
     def vector_norm(x, dim, ord=None):
         return xr.apply_ufunc(
             np.linalg.norm, x, input_core_dims=[[dim]], kwargs={"ord": ord, "axis": -1}
         )
 
-.. ipython:: python
+.. jupyter-execute::
 
     vector_norm(arr1, dim="x")
 
diff -pruN 2025.03.1-8/doc/user-guide/dask.rst 2025.10.1-1/doc/user-guide/dask.rst
--- 2025.03.1-8/doc/user-guide/dask.rst	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/doc/user-guide/dask.rst	2025-10-10 10:38:05.000000000 +0000
@@ -5,14 +5,45 @@
 Parallel Computing with Dask
 ============================
 
+.. jupyter-execute::
+
+    # Note that it's not necessary to import dask to use xarray with dask.
+    import numpy as np
+    import pandas as pd
+    import xarray as xr
+    import bottleneck
+
+.. jupyter-execute::
+    :hide-code:
+
+    import os
+
+    np.random.seed(123456)
+
+    # limit the amount of information printed to screen
+    xr.set_options(display_expand_data=False)
+    np.set_printoptions(precision=3, linewidth=100, threshold=10, edgeitems=2)
+
+    ds = xr.Dataset(
+        {
+            "temperature": (
+                ("time", "latitude", "longitude"),
+                np.random.randn(30, 180, 180),
+            ),
+            "time": pd.date_range("2015-01-01", periods=30),
+            "longitude": np.arange(180),
+            "latitude": np.arange(89.5, -90.5, -1),
+        }
+    )
+    ds.to_netcdf("example-data.nc")
+
+
 Xarray integrates with `Dask <https://dask.org/?utm_source=xarray-docs>`__, a general purpose library for parallel computing, to handle larger-than-memory computations.
 
 If you’ve been using Xarray to read in large datasets or split up data across a number of files, you may already be using Dask:
 
 .. code-block:: python
 
-    import xarray as xr
-
     ds = xr.open_zarr("/path/to/data.zarr")
     timeseries = ds["temp"].mean(dim=["x", "y"]).compute()  # Compute result
 
@@ -115,31 +146,6 @@ When reading data, Dask divides your dat
 Loading Dask Arrays
 ~~~~~~~~~~~~~~~~~~~
 
-.. ipython:: python
-    :suppress:
-
-    import os
-
-    import numpy as np
-    import pandas as pd
-    import xarray as xr
-
-    np.random.seed(123456)
-    np.set_printoptions(precision=3, linewidth=100, threshold=100, edgeitems=3)
-
-    ds = xr.Dataset(
-        {
-            "temperature": (
-                ("time", "latitude", "longitude"),
-                np.random.randn(30, 180, 180),
-            ),
-            "time": pd.date_range("2015-01-01", periods=30),
-            "longitude": np.arange(180),
-            "latitude": np.arange(89.5, -90.5, -1),
-        }
-    )
-    ds.to_netcdf("example-data.nc")
-
 There are a few common cases where you may want to convert lazy Dask arrays into eager, in-memory Xarray data structures:
 
 - You want to inspect smaller intermediate results when working interactively or debugging
@@ -148,7 +154,7 @@ There are a few common cases where you m
 
 To do this, you can use :py:meth:`Dataset.compute` or :py:meth:`DataArray.compute`:
 
-.. ipython:: python
+.. jupyter-execute::
 
     ds.compute()
 
@@ -158,11 +164,12 @@ To do this, you can use :py:meth:`Datase
 
 You can also access :py:attr:`DataArray.values`, which will always be a NumPy array:
 
-.. ipython::
-    :verbatim:
+.. jupyter-input::
+
+    ds.temperature.values
+
+.. jupyter-output::
 
-    In [5]: ds.temperature.values
-    Out[5]:
     array([[[  4.691e-01,  -2.829e-01, ...,  -5.577e-01,   3.814e-01],
             [  1.337e+00,  -1.531e+00, ...,   8.726e-01,  -1.538e+00],
             ...
@@ -171,9 +178,7 @@ You can also access :py:attr:`DataArray.
 NumPy ufuncs like :py:func:`numpy.sin` transparently work on all xarray objects, including those
 that store lazy Dask arrays:
 
-.. ipython:: python
-
-    import numpy as np
+.. jupyter-execute::
 
     np.sin(ds)
 
@@ -249,11 +254,6 @@ we use to calculate `Spearman's rank-cor
 
 .. code-block:: python
 
-    import numpy as np
-    import xarray as xr
-    import bottleneck
-
-
     def covariance_gufunc(x, y):
         return (
             (x - x.mean(axis=-1, keepdims=True)) * (y - y.mean(axis=-1, keepdims=True))
@@ -282,45 +282,46 @@ we use to calculate `Spearman's rank-cor
 
 The only aspect of this example that is different from standard usage of
 ``apply_ufunc()`` is that we needed to supply the ``output_dtypes`` arguments.
-(Read up on :ref:`comput.wrapping-custom` for an explanation of the
+(Read up on :ref:`compute.wrapping-custom` for an explanation of the
 "core dimensions" listed in ``input_core_dims``.)
 
 Our new ``spearman_correlation()`` function achieves near linear speedup
 when run on large arrays across the four cores on my laptop. It would also
 work as a streaming operation, when run on arrays loaded from disk:
 
-.. ipython::
-    :verbatim:
+.. jupyter-input::
 
-    In [56]: rs = np.random.default_rng(0)
+    rs = np.random.default_rng(0)
 
-    In [57]: array1 = xr.DataArray(rs.randn(1000, 100000), dims=["place", "time"])  # 800MB
+    array1 = xr.DataArray(rs.randn(1000, 100000), dims=["place", "time"])  # 800MB
 
-    In [58]: array2 = array1 + 0.5 * rs.randn(1000, 100000)
+    array2 = array1 + 0.5 * rs.randn(1000, 100000)
 
     # using one core, on NumPy arrays
-    In [61]: %time _ = spearman_correlation(array1, array2, 'time')
-    CPU times: user 21.6 s, sys: 2.84 s, total: 24.5 s
-    Wall time: 24.9 s
+    %time _ = spearman_correlation(array1, array2, 'time')
+    # CPU times: user 21.6 s, sys: 2.84 s, total: 24.5 s
+    # Wall time: 24.9 s
 
-    In [8]: chunked1 = array1.chunk({"place": 10})
+    chunked1 = array1.chunk({"place": 10})
 
-    In [9]: chunked2 = array2.chunk({"place": 10})
+    chunked2 = array2.chunk({"place": 10})
 
     # using all my laptop's cores, with Dask
-    In [63]: r = spearman_correlation(chunked1, chunked2, "time").compute()
+    r = spearman_correlation(chunked1, chunked2, "time").compute()
 
-    In [64]: %time _ = r.compute()
-    CPU times: user 30.9 s, sys: 1.74 s, total: 32.6 s
-    Wall time: 4.59 s
+    %time _ = r.compute()
+    # CPU times: user 30.9 s, sys: 1.74 s, total: 32.6 s
+    # Wall time: 4.59 s
 
 One limitation of ``apply_ufunc()`` is that it cannot be applied to arrays with
 multiple chunks along a core dimension:
 
-.. ipython::
-    :verbatim:
+.. jupyter-input::
+
+    spearman_correlation(chunked1, chunked2, "place")
+
+.. jupyter-output::
 
-    In [63]: spearman_correlation(chunked1, chunked2, "place")
     ValueError: dimension 'place' on 0th function argument to apply_ufunc with
     dask='parallelized' consists of multiple chunks, but is also a core
     dimension. To fix, rechunk into a single Dask array chunk along this
@@ -347,7 +348,7 @@ Functions that consume and return Xarray
 Your function will receive an Xarray Dataset or DataArray subset to one chunk
 along each chunked dimension.
 
-.. ipython:: python
+.. jupyter-execute::
 
     ds.temperature
 
@@ -356,7 +357,7 @@ At compute time, a function applied with
 (time x latitude x longitude) with values loaded. The following snippet illustrates how to check the shape of the object
 received by the applied function.
 
-.. ipython:: python
+.. jupyter-execute::
 
     def func(da):
         print(da.sizes)
@@ -375,7 +376,7 @@ work for your function, provide the ``te
 
 In this case, automatic inference has worked so let's check that the result is as expected.
 
-.. ipython:: python
+.. jupyter-execute::
 
     mapped.load(scheduler="single-threaded")
     mapped.identical(ds.time)
@@ -387,8 +388,8 @@ the returned result is identical to ``ds
 
 Here is a common example where automated inference will not work.
 
-.. ipython:: python
-    :okexcept:
+.. jupyter-execute::
+    :raises:
 
     def func(da):
         print(da.sizes)
@@ -412,7 +413,7 @@ incur much memory cost.
     ``attrs`` set in ``func`` will be ignored.
 
 
-.. ipython:: python
+.. jupyter-execute::
 
     template = ds.temperature.isel(time=[1, 11, 21])
     mapped = xr.map_blocks(func, ds.temperature, template=template)
@@ -421,7 +422,7 @@ incur much memory cost.
 Notice that the 0-shaped sizes were not printed to screen. Since ``template`` has been provided
 :py:func:`map_blocks` does not need to infer it by running ``func`` on 0-shaped inputs.
 
-.. ipython:: python
+.. jupyter-execute::
 
     mapped.identical(template)
 
@@ -429,7 +430,7 @@ Notice that the 0-shaped sizes were not
 :py:func:`map_blocks` also allows passing ``args`` and ``kwargs`` down to the user function ``func``.
 ``func`` will be executed as ``func(block_xarray, *args, **kwargs)`` so ``args`` must be a list and ``kwargs`` must be a dictionary.
 
-.. ipython:: python
+.. jupyter-execute::
 
     def func(obj, a, b=0):
         return obj + a + b
@@ -439,8 +440,8 @@ Notice that the 0-shaped sizes were not
     expected = ds + 10 + 10
     mapped.identical(expected)
 
-.. ipython:: python
-    :suppress:
+.. jupyter-execute::
+    :hide-code:
 
     ds.close()  # Closes "example-data.nc".
     os.remove("example-data.nc")
@@ -478,8 +479,6 @@ Here's an example of a simplified workfl
 
 .. code-block:: python
 
-    import xarray
-
     ds = xr.open_zarr(  # Since we're doing a spatial reduction, increase chunk size in x, y
         "my-data.zarr", chunks={"x": 100, "y": 100}
     )
diff -pruN 2025.03.1-8/doc/user-guide/data-structures.rst 2025.10.1-1/doc/user-guide/data-structures.rst
--- 2025.03.1-8/doc/user-guide/data-structures.rst	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/doc/user-guide/data-structures.rst	2025-10-10 10:38:05.000000000 +0000
@@ -3,12 +3,14 @@
 Data Structures
 ===============
 
-.. ipython:: python
-    :suppress:
+.. jupyter-execute::
+    :hide-code:
+    :hide-output:
 
     import numpy as np
     import pandas as pd
     import xarray as xr
+    import matplotlib.pyplot as plt
 
     np.random.seed(123456)
     np.set_printoptions(threshold=10)
@@ -16,7 +18,6 @@ Data Structures
     %xmode minimal
 
 
-
 DataArray
 ---------
 
@@ -62,7 +63,7 @@ The :py:class:`~xarray.DataArray` constr
 - ``attrs``: a dictionary of attributes to add to the instance
 - ``name``: a string that names the instance
 
-.. ipython:: python
+.. jupyter-execute::
 
     data = np.random.rand(4, 3)
     locs = ["IA", "IL", "IN"]
@@ -73,7 +74,7 @@ The :py:class:`~xarray.DataArray` constr
 Only ``data`` is required; all of other arguments will be filled
 in with default values:
 
-.. ipython:: python
+.. jupyter-execute::
 
     xr.DataArray(data)
 
@@ -111,13 +112,13 @@ Coordinates can be specified in the foll
 
 As a list of tuples:
 
-.. ipython:: python
+.. jupyter-execute::
 
     xr.DataArray(data, coords=[("time", times), ("space", locs)])
 
 As a dictionary:
 
-.. ipython:: python
+.. jupyter-execute::
 
     xr.DataArray(
         data,
@@ -132,7 +133,7 @@ As a dictionary:
 
 As a dictionary with coords across multiple dimensions:
 
-.. ipython:: python
+.. jupyter-execute::
 
     xr.DataArray(
         data,
@@ -150,12 +151,15 @@ If you create a ``DataArray`` by supplyi
 ``pandas.Panel``, any non-specified arguments in the
 ``DataArray`` constructor will be filled in from the pandas object:
 
-.. ipython:: python
+.. jupyter-execute::
 
     df = pd.DataFrame({"x": [0, 1], "y": [2, 3]}, index=["a", "b"])
     df.index.name = "abc"
     df.columns.name = "xyz"
     df
+
+.. jupyter-execute::
+
     xr.DataArray(df)
 
 DataArray properties
@@ -163,17 +167,29 @@ DataArray properties
 
 Let's take a look at the important properties on our array:
 
-.. ipython:: python
+.. jupyter-execute::
 
     foo.values
+
+.. jupyter-execute::
+
     foo.dims
+
+.. jupyter-execute::
+
     foo.coords
+
+.. jupyter-execute::
+
     foo.attrs
+
+.. jupyter-execute::
+
     print(foo.name)
 
 You can modify ``values`` inplace:
 
-.. ipython:: python
+.. jupyter-execute::
 
     foo.values = 1.0 * foo.values
 
@@ -186,7 +202,7 @@ You can modify ``values`` inplace:
 
 Now fill in some of that missing metadata:
 
-.. ipython:: python
+.. jupyter-execute::
 
     foo.name = "foo"
     foo.attrs["units"] = "meters"
@@ -195,7 +211,7 @@ Now fill in some of that missing metadat
 The :py:meth:`~xarray.DataArray.rename` method is another option, returning a
 new data array:
 
-.. ipython:: python
+.. jupyter-execute::
 
     foo.rename("bar")
 
@@ -206,9 +222,12 @@ The ``coords`` property is ``dict`` like
 accessed from the coordinates by name, or even by indexing the data array
 itself:
 
-.. ipython:: python
+.. jupyter-execute::
 
     foo.coords["time"]
+
+.. jupyter-execute::
+
     foo["time"]
 
 These are also :py:class:`~xarray.DataArray` objects, which contain tick-labels
@@ -216,10 +235,13 @@ for each dimension.
 
 Coordinates can also be set or removed by using the dictionary like syntax:
 
-.. ipython:: python
+.. jupyter-execute::
 
     foo["ranking"] = ("space", [1, 2, 3])
     foo.coords
+
+.. jupyter-execute::
+
     del foo["ranking"]
     foo.coords
 
@@ -295,7 +317,7 @@ pressure that were made under various co
 * they were made using instruments by three different manufacturers, which we
   will refer to as ``'manufac1'``, ``'manufac2'``, and ``'manufac3'``.
 
-.. ipython:: python
+.. jupyter-execute::
 
     np.random.seed(0)
     temperature = 15 + 8 * np.random.randn(2, 3, 4)
@@ -326,12 +348,12 @@ pressure that were made under various co
 Here we pass :py:class:`xarray.DataArray` objects or a pandas object as values
 in the dictionary:
 
-.. ipython:: python
+.. jupyter-execute::
 
     xr.Dataset(dict(bar=foo))
 
 
-.. ipython:: python
+.. jupyter-execute::
 
     xr.Dataset(dict(bar=foo.to_pandas()))
 
@@ -352,9 +374,9 @@ Dataset contents
 :py:class:`~xarray.Dataset` implements the Python mapping interface, with
 values given by :py:class:`xarray.DataArray` objects:
 
-.. ipython:: python
+.. jupyter-execute::
 
-    "temperature" in ds
+    print("temperature" in ds)
     ds["temperature"]
 
 Valid keys include each listed coordinate and data variable.
@@ -363,18 +385,20 @@ Data and coordinate variables are also c
 :py:attr:`~xarray.Dataset.data_vars` and :py:attr:`~xarray.Dataset.coords`
 dictionary-like attributes:
 
-.. ipython:: python
+.. jupyter-execute::
 
     ds.data_vars
+
+.. jupyter-execute::
+
     ds.coords
 
 Finally, like data arrays, datasets also store arbitrary metadata in the form
 of ``attributes``:
 
-.. ipython:: python
-
-    ds.attrs
+.. jupyter-execute::
 
+    print(ds.attrs)
     ds.attrs["title"] = "example attribute"
     ds
 
@@ -385,7 +409,7 @@ or :py:class:`numpy.ndarray` objects.
 As a useful shortcut, you can use attribute style access for reading (but not
 setting) variables and attributes:
 
-.. ipython:: python
+.. jupyter-execute::
 
     ds.temperature
 
@@ -400,7 +424,7 @@ Dictionary like methods
 We can update a dataset in-place using Python's standard dictionary syntax. For
 example, to create this example dataset from scratch, we could have written:
 
-.. ipython:: python
+.. jupyter-execute::
 
     ds = xr.Dataset()
     ds["temperature"] = (("loc", "instrument", "time"), temperature)
@@ -437,16 +461,22 @@ variables by indexing with a list of nam
 :py:meth:`~xarray.Dataset.drop_vars` methods to return a new ``Dataset``. These
 operations keep around coordinates:
 
-.. ipython:: python
+.. jupyter-execute::
 
     ds[["temperature"]]
+
+.. jupyter-execute::
+
     ds[["temperature", "temperature_double"]]
+
+.. jupyter-execute::
+
     ds.drop_vars("temperature")
 
 To remove a dimension, you can use :py:meth:`~xarray.Dataset.drop_dims` method.
 Any variables using that dimension are dropped:
 
-.. ipython:: python
+.. jupyter-execute::
 
     ds.drop_dims("time")
 
@@ -454,7 +484,7 @@ As an alternate to dictionary-like modif
 :py:meth:`~xarray.Dataset.assign` and :py:meth:`~xarray.Dataset.assign_coords`.
 These methods return a new dataset with additional (or replaced) values:
 
-.. ipython:: python
+.. jupyter-execute::
 
     ds.assign(temperature2=2 * ds.temperature)
 
@@ -464,7 +494,7 @@ simply calling it (e.g., ``func(ds)``).
 transforming your data (using "method chaining") instead of writing hard to
 follow nested function calls:
 
-.. ipython:: python
+.. jupyter-input::
 
     # these lines are equivalent, but with pipe we can make the logic flow
     # entirely from left to right
@@ -486,14 +516,14 @@ Renaming variables
 Another useful option is the :py:meth:`~xarray.Dataset.rename` method to rename
 dataset variables:
 
-.. ipython:: python
+.. jupyter-execute::
 
     ds.rename({"temperature": "temp", "precipitation": "precip"})
 
 The related :py:meth:`~xarray.Dataset.swap_dims` method allows you do to swap
 dimension and non-dimension variables:
 
-.. ipython:: python
+.. jupyter-execute::
 
     ds.coords["day"] = ("time", [6, 7, 8, 9])
     ds.swap_dims({"time": "day"})
@@ -565,7 +595,7 @@ The :py:class:`~xarray.DataTree` constru
 
 Let's make a single datatree node with some example data in it:
 
-.. ipython:: python
+.. jupyter-execute::
 
     ds1 = xr.Dataset({"foo": "orange"})
     dt = xr.DataTree(name="root", dataset=ds1)
@@ -573,14 +603,14 @@ Let's make a single datatree node with s
 
 At this point we have created a single node datatree with no parent and no children.
 
-.. ipython:: python
+.. jupyter-execute::
 
-    dt.parent is None
+    print(dt.parent is None)
     dt.children
 
 We can add a second node to this tree, assigning it to the parent node ``dt``:
 
-.. ipython:: python
+.. jupyter-execute::
 
     dataset2 = xr.Dataset({"bar": 0}, coords={"y": ("y", [0, 1, 2])})
     dt2 = xr.DataTree(name="a", dataset=dataset2)
@@ -593,7 +623,7 @@ More idiomatically you can create a tree
 ``DataTrees``. In this case we add a new node under ``dt["child-node"]`` by
 providing the explicit path under ``"child-node"`` as the dictionary key:
 
-.. ipython:: python
+.. jupyter-execute::
 
     # create a third Dataset
     ds3 = xr.Dataset({"zed": np.nan})
@@ -602,18 +632,16 @@ providing the explicit path under ``"chi
 
 We have created a tree with three nodes in it:
 
-.. ipython:: python
+.. jupyter-execute::
 
     dt
 
-
-
 Consistency checks are enforced. For instance, if we try to create a cycle,
 where the root node is also a child of a descendant, the constructor will raise
 an (:py:class:`~xarray.InvalidTreeError`):
 
-.. ipython:: python
-    :okexcept:
+.. jupyter-execute::
+    :raises:
 
     dt["child-node"].children = {"new-child": dt}
 
@@ -622,7 +650,7 @@ Alternatively you can also create a :py:
 - A dictionary mapping directory-like paths to either :py:class:`~xarray.DataTree` nodes or data, using :py:meth:`xarray.DataTree.from_dict()`,
 - A well formed netCDF or Zarr file on disk with :py:func:`~xarray.open_datatree()`. See :ref:`reading and writing files <io>`.
 
-For data files with groups that do not not align see
+For data files with groups that do not align see
 :py:func:`xarray.open_groups` or target each group individually
 :py:func:`xarray.open_dataset(group='groupname') <xarray.open_dataset>`. For
 more information about coordinate alignment see :ref:`datatree-inheritance`
@@ -636,16 +664,19 @@ Like :py:class:`~xarray.Dataset`, :py:cl
 but with values given by either :py:class:`~xarray.DataArray` objects or other
 :py:class:`~xarray.DataTree` objects.
 
-.. ipython:: python
+.. jupyter-execute::
 
     dt["child-node"]
+
+.. jupyter-execute::
+
     dt["foo"]
 
 Iterating over keys will iterate over both the names of variables and child nodes.
 
 We can also access all the data in a single node, and its inherited coordinates, through a dataset-like view
 
-.. ipython:: python
+.. jupyter-execute::
 
     dt["child-node"].dataset
 
@@ -655,16 +686,19 @@ returns an immutable view, but we can in
 as a new and mutable :py:class:`~xarray.Dataset` object via
 :py:meth:`DataTree.to_dataset() <xarray.DataTree.to_dataset>`:
 
-.. ipython:: python
+.. jupyter-execute::
 
     dt["child-node"].to_dataset()
 
 Like with :py:class:`~xarray.Dataset`, you can access the data and coordinate variables of a
 node separately via the :py:attr:`~xarray.DataTree.data_vars` and :py:attr:`~xarray.DataTree.coords` attributes:
 
-.. ipython:: python
+.. jupyter-execute::
 
     dt["child-node"].data_vars
+
+.. jupyter-execute::
+
     dt["child-node"].coords
 
 
@@ -675,7 +709,7 @@ We can update a datatree in-place using
 similar to how we can for Dataset objects. For example, to create this example
 DataTree from scratch, we could have written:
 
-.. ipython:: python
+.. jupyter-execute::
 
     dt = xr.DataTree(name="root")
     dt["foo"] = "orange"
@@ -720,7 +754,7 @@ size).
 
 Some examples:
 
-.. ipython:: python
+.. jupyter-execute::
 
     # Set up coordinates
     time = xr.DataArray(data=["2022-01", "2023-01"], dims="time")
@@ -780,7 +814,7 @@ that it applies to all descendent nodes.
 ``weather`` and in the ``temperature`` sub-tree.  Notice the inherited coordinates are
 explicitly shown in the tree representation under ``Inherited coordinates:``.
 
-.. ipython:: python
+.. jupyter-execute::
 
     dt2["/weather"]
 
@@ -788,17 +822,19 @@ Accessing any of the lower level trees t
 automatically includes coordinates from higher levels (e.g., ``time`` and
 ``station``):
 
-.. ipython:: python
+.. jupyter-execute::
 
     dt2["/weather/temperature"].dataset
 
 Similarly, when you retrieve a Dataset through :py:func:`~xarray.DataTree.to_dataset`  , the inherited coordinates are
 included by default unless you exclude them with the ``inherit`` flag:
 
-.. ipython:: python
+.. jupyter-execute::
 
     dt2["/weather/temperature"].to_dataset()
 
+.. jupyter-execute::
+
     dt2["/weather/temperature"].to_dataset(inherit=False)
 
 For more examples and further discussion see :ref:`alignment and coordinate inheritance <hierarchical-data.alignment-and-coordinate-inheritance>`.
@@ -811,7 +847,7 @@ Coordinates
 Coordinates are ancillary variables stored for ``DataArray`` and ``Dataset``
 objects in the ``coords`` attribute:
 
-.. ipython:: python
+.. jupyter-execute::
 
     ds.coords
 
@@ -856,10 +892,16 @@ To convert back and forth between data a
 :py:meth:`~xarray.Dataset.set_coords` and
 :py:meth:`~xarray.Dataset.reset_coords` methods:
 
-.. ipython:: python
+.. jupyter-execute::
 
     ds.reset_coords()
+
+.. jupyter-execute::
+
     ds.set_coords(["temperature", "precipitation"])
+
+.. jupyter-execute::
+
     ds["temperature"].reset_coords(drop=True)
 
 Notice that these operations skip coordinates with names given by dimensions,
@@ -874,15 +916,15 @@ Coordinates methods
 ``Coordinates`` objects also have a few useful methods, mostly for converting
 them into dataset objects:
 
-.. ipython:: python
+.. jupyter-execute::
 
     ds.coords.to_dataset()
 
 The merge method is particularly interesting, because it implements the same
 logic used for merging coordinates in arithmetic operations
-(see :ref:`comput`):
+(see :ref:`compute`):
 
-.. ipython:: python
+.. jupyter-execute::
 
     alt = xr.Dataset(coords={"z": [10], "lat": 0, "lon": 0})
     ds.coords.merge(alt.coords)
@@ -898,7 +940,7 @@ Indexes
 To convert a coordinate (or any ``DataArray``) into an actual
 :py:class:`pandas.Index`, use the :py:meth:`~xarray.DataArray.to_index` method:
 
-.. ipython:: python
+.. jupyter-execute::
 
     ds["time"].to_index()
 
@@ -906,7 +948,7 @@ A useful shortcut is the ``indexes`` pro
 ``Dataset``), which lazily constructs a dictionary whose keys are given by each
 dimension and whose the values are ``Index`` objects:
 
-.. ipython:: python
+.. jupyter-execute::
 
     ds.indexes
 
@@ -915,7 +957,7 @@ MultiIndex coordinates
 
 Xarray supports labeling coordinate values with a :py:class:`pandas.MultiIndex`:
 
-.. ipython:: python
+.. jupyter-execute::
 
     midx = pd.MultiIndex.from_arrays(
         [["R", "R", "V", "V"], [0.1, 0.2, 0.7, 0.9]], names=("band", "wn")
@@ -926,9 +968,12 @@ Xarray supports labeling coordinate valu
 For convenience multi-index levels are directly accessible as "virtual" or
 "derived" coordinates (marked by ``-`` when printing a dataset or data array):
 
-.. ipython:: python
+.. jupyter-execute::
 
     mda["band"]
+
+.. jupyter-execute::
+
     mda.wn
 
 Indexing with multi-index levels is also possible using the ``sel`` method
diff -pruN 2025.03.1-8/doc/user-guide/duckarrays.rst 2025.10.1-1/doc/user-guide/duckarrays.rst
--- 2025.03.1-8/doc/user-guide/duckarrays.rst	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/doc/user-guide/duckarrays.rst	2025-10-10 10:38:05.000000000 +0000
@@ -49,9 +49,14 @@ numpy-like functionality such as indexin
 For example, the `sparse <https://sparse.pydata.org/en/stable/>`_ library provides a sparse array type which is useful for representing nD array objects like sparse matrices
 in a memory-efficient manner. We can create a sparse array object (of the :py:class:`sparse.COO` type) from a numpy array like this:
 
-.. ipython:: python
+.. jupyter-execute::
 
     from sparse import COO
+    import xarray as xr
+    import numpy as np
+    %xmode minimal
+
+.. jupyter-execute::
 
     x = np.eye(4, dtype=np.uint8)  # create diagonal identity matrix
     s = COO.from_numpy(x)
@@ -63,14 +68,17 @@ Sparse array objects can be converted ba
 
 Just like :py:class:`numpy.ndarray` objects, :py:class:`sparse.COO` arrays support indexing
 
-.. ipython:: python
+.. jupyter-execute::
 
     s[1, 1]  # diagonal elements should be ones
+
+.. jupyter-execute::
+
     s[2, 3]  # off-diagonal elements should be zero
 
 broadcasting,
 
-.. ipython:: python
+.. jupyter-execute::
 
     x2 = np.zeros(
         (4, 1), dtype=np.uint8
@@ -80,14 +88,14 @@ broadcasting,
 
 and various computation methods
 
-.. ipython:: python
+.. jupyter-execute::
 
     s.sum(axis=1)
 
 This numpy-like array also supports calling so-called `numpy ufuncs <https://numpy.org/doc/stable/reference/ufuncs.html#available-ufuncs>`_
 ("universal functions") on it directly:
 
-.. ipython:: python
+.. jupyter-execute::
 
     np.sum(s, axis=1)
 
@@ -113,7 +121,7 @@ both accept data in various forms throug
 
 For example, we can wrap the sparse array we created earlier inside a new DataArray object:
 
-.. ipython:: python
+.. jupyter-execute::
 
     s_da = xr.DataArray(s, dims=["i", "j"])
     s_da
@@ -123,7 +131,7 @@ representation of the underlying wrapped
 
 Of course our sparse array object is still there underneath - it's stored under the ``.data`` attribute of the dataarray:
 
-.. ipython:: python
+.. jupyter-execute::
 
     s_da.data
 
@@ -132,7 +140,7 @@ Array methods
 
 We saw above that numpy-like arrays provide numpy methods. Xarray automatically uses these when you call the corresponding xarray method:
 
-.. ipython:: python
+.. jupyter-execute::
 
     s_da.sum(dim="j")
 
@@ -141,7 +149,7 @@ Converting wrapped types
 
 If you want to change the type inside your xarray object you can use :py:meth:`DataArray.as_numpy`:
 
-.. ipython:: python
+.. jupyter-execute::
 
     s_da.as_numpy()
 
@@ -152,12 +160,12 @@ If instead you want to convert to numpy
 always uses :py:func:`numpy.asarray` which will fail for some array types (e.g. ``cupy``), whereas :py:meth:`~DataArray.to_numpy`
 uses the correct method depending on the array type.
 
-.. ipython:: python
+.. jupyter-execute::
 
     s_da.to_numpy()
 
-.. ipython:: python
-    :okexcept:
+.. jupyter-execute::
+    :raises:
 
     s_da.values
 
diff -pruN 2025.03.1-8/doc/user-guide/ecosystem.rst 2025.10.1-1/doc/user-guide/ecosystem.rst
--- 2025.03.1-8/doc/user-guide/ecosystem.rst	1970-01-01 00:00:00.000000000 +0000
+++ 2025.10.1-1/doc/user-guide/ecosystem.rst	2025-10-10 10:38:05.000000000 +0000
@@ -0,0 +1,111 @@
+.. _ecosystem:
+
+Xarray related projects
+-----------------------
+
+Below is a list of existing open source projects that build
+functionality upon xarray. See also section :ref:`internals` for more
+details on how to build xarray extensions. We also maintain the
+`xarray-contrib <https://github.com/xarray-contrib>`_ GitHub organization
+as a place to curate projects that build upon xarray.
+
+Geosciences
+~~~~~~~~~~~
+
+- `aospy <https://aospy.readthedocs.io>`_: Automated analysis and management of gridded climate data.
+- `argopy <https://github.com/euroargodev/argopy>`_: xarray-based Argo data access, manipulation and visualisation for standard users as well as Argo experts.
+- `cf_xarray <https://cf-xarray.readthedocs.io/en/latest/>`_: Provides an accessor (DataArray.cf or Dataset.cf) that allows you to interpret Climate and Forecast metadata convention attributes present on xarray objects.
+- `climpred <https://climpred.readthedocs.io>`_: Analysis of ensemble forecast models for climate prediction.
+- `geocube <https://corteva.github.io/geocube>`_: Tool to convert geopandas vector data into rasterized xarray data.
+- `GeoWombat <https://github.com/jgrss/geowombat>`_: Utilities for analysis of remotely sensed and gridded raster data at scale (easily tame Landsat, Sentinel, Quickbird, and PlanetScope).
+- `grib2io <https://github.com/NOAA-MDL/grib2io>`_: Utility to work with GRIB2 files including an xarray backend, DASK support for parallel reading in open_mfdataset, lazy loading of data, editing of GRIB2 attributes and GRIB2IO DataArray attrs, and spatial interpolation and reprojection of GRIB2 messages and GRIB2IO Datasets/DataArrays for both grid to grid and grid to stations.
+- `gsw-xarray <https://github.com/DocOtak/gsw-xarray>`_: a wrapper around `gsw <https://teos-10.github.io/GSW-Python>`_ that adds CF compliant attributes when possible, units, name.
+- `infinite-diff <https://github.com/spencerahill/infinite-diff>`_: xarray-based finite-differencing, focused on gridded climate/meteorology data
+- `marc_analysis <https://github.com/darothen/marc_analysis>`_: Analysis package for CESM/MARC experiments and output.
+- `MetPy <https://unidata.github.io/MetPy/dev/index.html>`_: A collection of tools in Python for reading, visualizing, and performing calculations with weather data.
+- `MPAS-Analysis <https://mpas-dev.github.io/MPAS-Analysis>`_: Analysis for simulations produced with Model for Prediction Across Scales (MPAS) components and the Accelerated Climate Model for Energy (ACME).
+- `OGGM <https://oggm.org/>`_: Open Global Glacier Model
+- `Oocgcm <https://oocgcm.readthedocs.io/>`_: Analysis of large gridded geophysical datasets
+- `Open Data Cube <https://www.opendatacube.org/>`_: Analysis toolkit of continental scale Earth Observation data from satellites.
+- `Pangaea <https://pangaea.readthedocs.io/en/latest/>`_: xarray extension for gridded land surface & weather model output).
+- `Pangeo <https://pangeo.io>`_: A community effort for big data geoscience in the cloud.
+- `PyGDX <https://pygdx.readthedocs.io/en/latest/>`_: Python 3 package for
+  accessing data stored in GAMS Data eXchange (GDX) files. Also uses a custom
+  subclass.
+- `pyinterp <https://pangeo-pyinterp.readthedocs.io/en/latest/>`_: Python 3 package for interpolating geo-referenced data used in the field of geosciences.
+- `pyXpcm <https://pyxpcm.readthedocs.io>`_: xarray-based Profile Classification Modelling (PCM), mostly for ocean data.
+- `Regionmask <https://regionmask.readthedocs.io/>`_: plotting and creation of masks of spatial regions
+- `rioxarray <https://corteva.github.io/rioxarray>`_: geospatial xarray extension powered by rasterio
+- `salem <https://salem.readthedocs.io>`_: Adds geolocalised subsetting, masking, and plotting operations to xarray's data structures via accessors.
+- `SatPy <https://satpy.readthedocs.io/>`_ : Library for reading and manipulating meteorological remote sensing data and writing it to various image and data file formats.
+- `SARXarray <https://tudelftgeodesy.github.io/sarxarray/>`_: xarray extension for reading and processing large Synthetic Aperture Radar (SAR) data stacks.
+- `shxarray <https://shxarray.wobbly.earth/>`_: Convert, filter,and map geodesy related spherical harmonic representations of gravity and terrestrial water storage through an xarray extension.
+- `Spyfit <https://spyfit.readthedocs.io/en/master/>`_: FTIR spectroscopy of the atmosphere
+- `windspharm <https://ajdawson.github.io/windspharm/index.html>`_: Spherical
+  harmonic wind analysis in Python.
+- `wradlib <https://wradlib.org/>`_: An Open Source Library for Weather Radar Data Processing.
+- `wrf-python <https://wrf-python.readthedocs.io/>`_: A collection of diagnostic and interpolation routines for use with output of the Weather Research and Forecasting (WRF-ARW) Model.
+- `xarray-eopf <https://github.com/EOPF-Sample-Service/xarray-eopf>`_: An xarray backend implementation for opening ESA EOPF data products in Zarr format.
+- `xarray-regrid <https://github.com/EXCITED-CO2/xarray-regrid>`_: xarray extension for regridding rectilinear data.
+- `xarray-simlab <https://xarray-simlab.readthedocs.io>`_: xarray extension for computer model simulations.
+- `xarray-spatial <https://xarray-spatial.org/>`_: Numba-accelerated raster-based spatial processing tools (NDVI, curvature, zonal-statistics, proximity, hillshading, viewshed, etc.)
+- `xarray-topo <https://xarray-topo.readthedocs.io/>`_: xarray extension for topographic analysis and modelling.
+- `xbpch <https://github.com/darothen/xbpch>`_: xarray interface for bpch files.
+- `xCDAT <https://xcdat.readthedocs.io/>`_: An extension of xarray for climate data analysis on structured grids.
+- `xclim <https://xclim.readthedocs.io/>`_: A library for calculating climate science indices with unit handling built from xarray and dask.
+- `xESMF <https://pangeo-xesmf.readthedocs.io/>`_: Universal regridder for geospatial data.
+- `xgcm <https://xgcm.readthedocs.io/>`_: Extends the xarray data model to understand finite volume grid cells (common in General Circulation Models) and provides interpolation and difference operations for such grids.
+- `xmitgcm <https://xmitgcm.readthedocs.io/>`_: a python package for reading `MITgcm <https://mitgcm.org/>`_ binary MDS files into xarray data structures.
+- `xnemogcm <https://github.com/rcaneill/xnemogcm/>`_: a package to read `NEMO <https://nemo-ocean.eu/>`_ output files and add attributes to interface with xgcm.
+
+Machine Learning
+~~~~~~~~~~~~~~~~
+- `ArviZ <https://arviz-devs.github.io/arviz/>`_: Exploratory analysis of Bayesian models, built on top of xarray.
+- `Darts <https://github.com/unit8co/darts/>`_: User-friendly modern machine learning for time series in Python.
+- `Elm <https://ensemble-learning-models.readthedocs.io>`_: Parallel machine learning on xarray data structures
+- `sklearn-xarray (1) <https://phausamann.github.io/sklearn-xarray>`_: Combines scikit-learn and xarray (1).
+- `sklearn-xarray (2) <https://sklearn-xarray.readthedocs.io/en/latest/>`_: Combines scikit-learn and xarray (2).
+- `xbatcher <https://xbatcher.readthedocs.io>`_: Batch Generation from Xarray Datasets.
+
+Other domains
+~~~~~~~~~~~~~
+- `ptsa <https://pennmem.github.io/ptsa/html/index.html>`_: EEG Time Series Analysis
+- `pycalphad <https://pycalphad.org/docs/latest/>`_: Computational Thermodynamics in Python
+- `pyomeca <https://pyomeca.github.io/>`_: Python framework for biomechanical analysis
+- `movement <https://movement.neuroinformatics.dev/>`_: A Python toolbox for analysing animal body movements
+
+Extend xarray capabilities
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+- `Collocate <https://github.com/cistools/collocate>`_: Collocate xarray trajectories in arbitrary physical dimensions
+- `eofs <https://ajdawson.github.io/eofs/>`_: EOF analysis in Python.
+- `hypothesis-gufunc <https://hypothesis-gufunc.readthedocs.io/en/latest/>`_: Extension to hypothesis. Makes it easy to write unit tests with xarray objects as input.
+- `ntv-pandas <https://github.com/loco-philippe/ntv-pandas>`_ : A tabular analyzer and a semantic, compact and reversible converter for multidimensional and tabular data
+- `nxarray <https://github.com/nxarray/nxarray>`_: NeXus input/output capability for xarray.
+- `xarray-compare <https://github.com/astropenguin/xarray-compare>`_: xarray extension for data comparison.
+- `xarray-dataclasses <https://github.com/astropenguin/xarray-dataclasses>`_: xarray extension for typed DataArray and Dataset creation.
+- `xarray_einstats <https://xarray-einstats.readthedocs.io>`_: Statistics, linear algebra and einops for xarray
+- `xarray_extras <https://github.com/crusaderky/xarray_extras>`_: Advanced algorithms for xarray objects (e.g. integrations/interpolations).
+- `xeofs <https://github.com/nicrie/xeofs>`_: PCA/EOF analysis and related techniques, integrated with xarray and Dask for efficient handling of large-scale data.
+- `xpublish <https://xpublish.readthedocs.io/>`_: Publish Xarray Datasets via a Zarr compatible REST API.
+- `xrft <https://github.com/rabernat/xrft>`_: Fourier transforms for xarray data.
+- `xr-scipy <https://xr-scipy.readthedocs.io>`_: A lightweight scipy wrapper for xarray.
+- `X-regression <https://github.com/kuchaale/X-regression>`_: Multiple linear regression from Statsmodels library coupled with Xarray library.
+- `xskillscore <https://github.com/xarray-contrib/xskillscore>`_: Metrics for verifying forecasts.
+- `xyzpy <https://xyzpy.readthedocs.io>`_: Easily generate high dimensional data, including parallelization.
+- `xarray-lmfit <https://github.com/kmnhan/xarray-lmfit>`_: xarray extension for curve fitting using `lmfit <https://lmfit.github.io/lmfit-py/>`_.
+
+Visualization
+~~~~~~~~~~~~~
+- `datashader <https://datashader.org>`_, `geoviews <https://geoviews.org>`_, `holoviews <https://holoviews.org/>`_, : visualization packages for large data.
+- `hvplot <https://hvplot.pyviz.org/>`_ : A high-level plotting API for the PyData ecosystem built on HoloViews.
+- `psyplot <https://psyplot.readthedocs.io>`_: Interactive data visualization with python.
+- `xarray-leaflet <https://github.com/davidbrochart/xarray_leaflet>`_: An xarray extension for tiled map plotting based on ipyleaflet.
+- `xtrude <https://github.com/davidbrochart/xtrude>`_: An xarray extension for 3D terrain visualization based on pydeck.
+- `pyvista-xarray <https://github.com/pyvista/pyvista-xarray>`_: xarray DataArray accessor for 3D visualization with `PyVista <https://github.com/pyvista/pyvista>`_ and DataSet engines for reading VTK data formats.
+
+Non-Python projects
+~~~~~~~~~~~~~~~~~~~
+- `xframe <https://github.com/xtensor-stack/xframe>`_: C++ data structures inspired by xarray.
+- `AxisArrays <https://github.com/JuliaArrays/AxisArrays.jl>`_, `NamedArrays <https://github.com/davidavdav/NamedArrays.jl>`_ and `YAXArrays.jl <https://github.com/JuliaDataCubes/YAXArrays.jl>`_: similar data structures for Julia.
+
+More projects can be found at the `"xarray" Github topic <https://github.com/topics/xarray>`_.
diff -pruN 2025.03.1-8/doc/user-guide/groupby.rst 2025.10.1-1/doc/user-guide/groupby.rst
--- 2025.03.1-8/doc/user-guide/groupby.rst	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/doc/user-guide/groupby.rst	2025-10-10 10:38:05.000000000 +0000
@@ -37,8 +37,8 @@ Split
 
 Let's create a simple example dataset:
 
-.. ipython:: python
-    :suppress:
+.. jupyter-execute::
+    :hide-code:
 
     import numpy as np
     import pandas as pd
@@ -46,7 +46,7 @@ Let's create a simple example dataset:
 
     np.random.seed(123456)
 
-.. ipython:: python
+.. jupyter-execute::
 
     ds = xr.Dataset(
         {"foo": (("x", "y"), np.random.rand(4, 3))},
@@ -58,26 +58,26 @@ Let's create a simple example dataset:
 If we groupby the name of a variable or coordinate in a dataset (we can also
 use a DataArray directly), we get back a ``GroupBy`` object:
 
-.. ipython:: python
+.. jupyter-execute::
 
     ds.groupby("letters")
 
 This object works very similarly to a pandas GroupBy object. You can view
 the group indices with the ``groups`` attribute:
 
-.. ipython:: python
+.. jupyter-execute::
 
     ds.groupby("letters").groups
 
 You can also iterate over groups in ``(label, group)`` pairs:
 
-.. ipython:: python
+.. jupyter-execute::
 
     list(ds.groupby("letters"))
 
 You can index out a particular group:
 
-.. ipython:: python
+.. jupyter-execute::
 
     ds.groupby("letters")["b"]
 
@@ -91,7 +91,7 @@ but instead want to "bin" the data into
 a customized coordinate, but xarray facilitates this via the
 :py:meth:`Dataset.groupby_bins` method.
 
-.. ipython:: python
+.. jupyter-execute::
 
     x_bins = [0, 25, 50]
     ds.groupby_bins("x", x_bins).groups
@@ -102,7 +102,7 @@ labeled with strings using set notation
 override this behavior, you can specify the bin labels explicitly. Here we
 choose ``float`` labels which identify the bin centers:
 
-.. ipython:: python
+.. jupyter-execute::
 
     x_bin_labels = [12.5, 37.5]
     ds.groupby_bins("x", x_bins, labels=x_bin_labels).groups
@@ -115,7 +115,7 @@ To apply a function to each group, you c
 :py:meth:`core.groupby.DatasetGroupBy.map` method. The resulting objects are automatically
 concatenated back together along the group axis:
 
-.. ipython:: python
+.. jupyter-execute::
 
     def standardize(x):
         return (x - x.mean()) / x.std()
@@ -127,14 +127,14 @@ GroupBy objects also have a :py:meth:`co
 methods like :py:meth:`core.groupby.DatasetGroupBy.mean` as shortcuts for applying an
 aggregation function:
 
-.. ipython:: python
+.. jupyter-execute::
 
     arr.groupby("letters").mean(dim="x")
 
 Using a groupby is thus also a convenient shortcut for aggregating over all
 dimensions *other than* the provided one:
 
-.. ipython:: python
+.. jupyter-execute::
 
     ds.groupby("x").std(...)
 
@@ -151,7 +151,7 @@ There are two special aggregation operat
 groupby objects: first and last. These provide the first or last example of
 values for group along the grouped dimension:
 
-.. ipython:: python
+.. jupyter-execute::
 
     ds.groupby("letters").first(...)
 
@@ -166,10 +166,13 @@ for ``(GroupBy, Dataset)`` and ``(GroupB
 dataset or data array uses the unique grouped values as one of its index
 coordinates. For example:
 
-.. ipython:: python
+.. jupyter-execute::
 
     alt = arr.groupby("letters").mean(...)
     alt
+
+.. jupyter-execute::
+
     ds.groupby("letters") - alt
 
 This last line is roughly equivalent to the following::
@@ -191,7 +194,7 @@ operations over multidimensional coordin
 
 __ https://cfconventions.org/cf-conventions/v1.6.0/cf-conventions.html#_two_dimensional_latitude_longitude_coordinate_variables
 
-.. ipython:: python
+.. jupyter-execute::
 
     da = xr.DataArray(
         [[0, 1], [2, 3]],
@@ -202,14 +205,20 @@ __ https://cfconventions.org/cf-conventi
         dims=["ny", "nx"],
     )
     da
+
+.. jupyter-execute::
+
     da.groupby("lon").sum(...)
+
+.. jupyter-execute::
+
     da.groupby("lon").map(lambda x: x - x.mean(), shortcut=False)
 
 Because multidimensional groups have the ability to generate a very large
 number of bins, coarse-binning via :py:meth:`Dataset.groupby_bins`
 may be desirable:
 
-.. ipython:: python
+.. jupyter-execute::
 
     da.groupby_bins("lon", [0, 45, 50]).sum()
 
@@ -217,7 +226,7 @@ These methods group by ``lon`` values. I
 cell in a grid, regardless of value, by stacking multiple dimensions,
 applying your function, and then unstacking the result:
 
-.. ipython:: python
+.. jupyter-execute::
 
     stacked = da.stack(gridcell=["ny", "nx"])
     stacked.groupby("gridcell").sum(...).unstack("gridcell")
@@ -310,7 +319,7 @@ Grouping by multiple variables
 
 Use grouper objects to group by multiple dimensions:
 
-.. ipython:: python
+.. jupyter-execute::
 
     from xarray.groupers import UniqueGrouper
 
@@ -318,20 +327,28 @@ Use grouper objects to group by multiple
 
 The above is sugar for using ``UniqueGrouper`` objects directly:
 
-.. ipython:: python
+.. jupyter-execute::
 
     da.groupby(lat=UniqueGrouper(), lon=UniqueGrouper()).sum()
 
 
 Different groupers can be combined to construct sophisticated GroupBy operations.
 
-.. ipython:: python
+.. jupyter-execute::
 
     from xarray.groupers import BinGrouper
 
     ds.groupby(x=BinGrouper(bins=[5, 15, 25]), letters=UniqueGrouper()).sum()
 
 
+Time Grouping and Resampling
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. seealso::
+
+   See :ref:`resampling`.
+
+
 Shuffling
 ~~~~~~~~~
 
@@ -339,7 +356,7 @@ Shuffling is a generalization of sorting
 Shuffling reorders the DataArray or the DataArrays in a Dataset such that all members of a group occur sequentially. For example,
 Shuffle the object using either :py:class:`DatasetGroupBy` or :py:class:`DataArrayGroupBy` as appropriate.
 
-.. ipython:: python
+.. jupyter-execute::
 
     da = xr.DataArray(
         dims="x",
diff -pruN 2025.03.1-8/doc/user-guide/hierarchical-data.rst 2025.10.1-1/doc/user-guide/hierarchical-data.rst
--- 2025.03.1-8/doc/user-guide/hierarchical-data.rst	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/doc/user-guide/hierarchical-data.rst	2025-10-10 10:38:05.000000000 +0000
@@ -3,8 +3,9 @@
 Hierarchical data
 =================
 
-.. ipython:: python
-    :suppress:
+.. jupyter-execute::
+    :hide-code:
+    :hide-output:
 
     import numpy as np
     import pandas as pd
@@ -54,7 +55,7 @@ Here we go into more detail about how to
 
 Let's start by defining nodes representing the two siblings, Bart and Lisa Simpson:
 
-.. ipython:: python
+.. jupyter-execute::
 
     bart = xr.DataTree(name="Bart")
     lisa = xr.DataTree(name="Lisa")
@@ -62,35 +63,43 @@ Let's start by defining nodes representi
 Each of these node objects knows their own :py:class:`~xarray.DataTree.name`, but they currently have no relationship to one another.
 We can connect them by creating another node representing a common parent, Homer Simpson:
 
-.. ipython:: python
+.. jupyter-execute::
 
     homer = xr.DataTree(name="Homer", children={"Bart": bart, "Lisa": lisa})
 
 Here we set the children of Homer in the node's constructor.
-We now have a small family tree
+We now have a small family tree where we can see how these individual Simpson family members are related to one another:
 
-.. ipython:: python
+.. jupyter-execute::
 
-    homer
+    print(homer)
+
+.. note::
+   We use ``print()`` above to show the compact tree hierarchy.
+   :py:class:`~xarray.DataTree` objects also have an interactive HTML representation that is enabled by default in editors such as JupyterLab and VSCode.
+   The HTML representation is especially helpful for larger trees and exploring new datasets, as it allows you to expand and collapse nodes.
+   If you prefer the text representations you can also set ``xr.set_options(display_style="text")``.
+
+..
+   Comment:: may remove note and print()s after upstream theme changes https://github.com/pydata/pydata-sphinx-theme/pull/2187
 
-where we can see how these individual Simpson family members are related to one another.
 The nodes representing Bart and Lisa are now connected - we can confirm their sibling rivalry by examining the :py:class:`~xarray.DataTree.siblings` property:
 
-.. ipython:: python
+.. jupyter-execute::
 
     list(homer["Bart"].siblings)
 
 But oops, we forgot Homer's third daughter, Maggie! Let's add her by updating Homer's :py:class:`~xarray.DataTree.children` property to include her:
 
-.. ipython:: python
+.. jupyter-execute::
 
     maggie = xr.DataTree(name="Maggie")
     homer.children = {"Bart": bart, "Lisa": lisa, "Maggie": maggie}
-    homer
+    print(homer)
 
 Let's check that Maggie knows who her Dad is:
 
-.. ipython:: python
+.. jupyter-execute::
 
     maggie.parent.name
 
@@ -103,36 +112,40 @@ That's good - updating the properties of
 
 Homer is currently listed as having no parent (the so-called "root node" of this tree), but we can update his :py:class:`~xarray.DataTree.parent` property:
 
-.. ipython:: python
+.. jupyter-execute::
 
     abe = xr.DataTree(name="Abe")
     abe.children = {"Homer": homer}
 
 Abe is now the "root" of this tree, which we can see by examining the :py:class:`~xarray.DataTree.root` property of any node in the tree
 
-.. ipython:: python
+.. jupyter-execute::
 
     maggie.root.name
 
 We can see the whole tree by printing Abe's node or just part of the tree by printing Homer's node:
 
-.. ipython:: python
+.. jupyter-execute::
 
-    abe
-    abe["Homer"]
+    print(abe)
 
+.. jupyter-execute::
+
+    print(abe["Homer"])
 
 In episode 28, Abe Simpson reveals that he had another son, Herbert "Herb" Simpson.
 We can add Herbert to the family tree without displacing Homer by :py:meth:`~xarray.DataTree.assign`-ing another child to Abe:
 
-.. ipython:: python
+.. jupyter-execute::
 
     herbert = xr.DataTree(name="Herb")
     abe = abe.assign({"Herbert": herbert})
-    abe
+    print(abe)
+
+.. jupyter-execute::
 
-    abe["Herbert"].name
-    herbert.name
+    print(abe["Herbert"].name)
+    print(herbert.name)
 
 .. note::
    This example shows a subtlety - the returned tree has Homer's brother listed as ``"Herbert"``,
@@ -145,8 +158,8 @@ Certain manipulations of our tree are fo
 In episode 51 of the show Futurama, Philip J. Fry travels back in time and accidentally becomes his own Grandfather.
 If we try similar time-travelling hijinks with Homer, we get a :py:class:`~xarray.InvalidTreeError` raised:
 
-.. ipython:: python
-    :okexcept:
+.. jupyter-execute::
+    :raises:
 
     abe["Homer"].children = {"Abe": abe}
 
@@ -157,7 +170,7 @@ Ancestry in an Evolutionary Tree
 
 Let's use a different example of a tree to discuss more complex relationships between nodes - the phylogenetic tree, or tree of life.
 
-.. ipython:: python
+.. jupyter-execute::
 
     vertebrates = xr.DataTree.from_dict(
         {
@@ -173,6 +186,7 @@ Let's use a different example of a tree
     )
 
     primates = vertebrates["/Bony Skeleton/Four Limbs/Amniotic Egg/Hair/Primates"]
+
     dinosaurs = vertebrates[
         "/Bony Skeleton/Four Limbs/Amniotic Egg/Two Fenestrae/Dinosaurs"
     ]
@@ -180,9 +194,9 @@ Let's use a different example of a tree
 We have used the :py:meth:`~xarray.DataTree.from_dict` constructor method as a preferred way to quickly create a whole tree,
 and :ref:`filesystem paths` (to be explained shortly) to select two nodes of interest.
 
-.. ipython:: python
+.. jupyter-execute::
 
-    vertebrates
+    print(vertebrates)
 
 This tree shows various families of species, grouped by their common features (making it technically a `"Cladogram" <https://en.wikipedia.org/wiki/Cladogram>`_,
 rather than an evolutionary tree).
@@ -191,27 +205,27 @@ Here both the species and the features u
 We can however get a list of only the nodes we used to represent species by using the fact that all those nodes have no children - they are "leaf nodes".
 We can check if a node is a leaf with :py:meth:`~xarray.DataTree.is_leaf`, and get a list of all leaves with the :py:class:`~xarray.DataTree.leaves` property:
 
-.. ipython:: python
+.. jupyter-execute::
 
-    primates.is_leaf
+    print(primates.is_leaf)
     [node.name for node in vertebrates.leaves]
 
 Pretending that this is a true evolutionary tree for a moment, we can find the features of the evolutionary ancestors (so-called "ancestor" nodes),
 the distinguishing feature of the common ancestor of all vertebrate life (the root node),
 and even the distinguishing feature of the common ancestor of any two species (the common ancestor of two nodes):
 
-.. ipython:: python
+.. jupyter-execute::
 
-    [node.name for node in reversed(primates.parents)]
-    primates.root.name
-    primates.find_common_ancestor(dinosaurs).name
+    print([node.name for node in reversed(primates.parents)])
+    print(primates.root.name)
+    print(primates.find_common_ancestor(dinosaurs).name)
 
 We can only find a common ancestor between two nodes that lie in the same tree.
 If we try to find the common evolutionary ancestor between primates and an Alien species that has no relationship to Earth's evolutionary tree,
 an error will be raised.
 
-.. ipython:: python
-    :okexcept:
+.. jupyter-execute::
+    :raises:
 
     alien = xr.DataTree(name="Xenomorph")
     primates.find_common_ancestor(alien)
@@ -229,7 +243,7 @@ Properties
 
 We can navigate trees using the :py:class:`~xarray.DataTree.parent` and :py:class:`~xarray.DataTree.children` properties of each node, for example:
 
-.. ipython:: python
+.. jupyter-execute::
 
     lisa.parent.children["Bart"].name
 
@@ -244,15 +258,15 @@ In general :py:class:`~xarray.DataTree.D
 including :py:meth:`~xarray.DataTree.keys`, :py:class:`~xarray.DataTree.values`, :py:class:`~xarray.DataTree.items`,
 :py:meth:`~xarray.DataTree.__delitem__` and :py:meth:`~xarray.DataTree.update`.
 
-.. ipython:: python
+.. jupyter-execute::
 
-    vertebrates["Bony Skeleton"]["Ray-finned Fish"]
+    print(vertebrates["Bony Skeleton"]["Ray-finned Fish"])
 
 Note that the dict-like interface combines access to child :py:class:`~xarray.DataTree` nodes and stored :py:class:`~xarray.DataArrays`,
 so if we have a node that contains both children and data, calling :py:meth:`~xarray.DataTree.keys` will list both names of child nodes and
 names of data variables:
 
-.. ipython:: python
+.. jupyter-execute::
 
     dt = xr.DataTree(
         dataset=xr.Dataset({"foo": 0, "bar": 1}),
@@ -268,10 +282,10 @@ Attribute-like access
 
 You can also select both variables and child nodes through dot indexing
 
-.. ipython:: python
+.. jupyter-execute::
 
-    dt.foo
-    dt.a
+    print(dt.foo)
+    print(dt.a)
 
 .. _filesystem paths:
 
@@ -295,10 +309,10 @@ This is an extension of the conventional
 
 Like with filepaths, paths within the tree can either be relative to the current node, e.g.
 
-.. ipython:: python
+.. jupyter-execute::
 
-    abe["Homer/Bart"].name
-    abe["./Homer/Bart"].name  # alternative syntax
+    print(abe["Homer/Bart"].name)
+    print(abe["./Homer/Bart"].name)  # alternative syntax
 
 or relative to the root node.
 A path specified from the root (as opposed to being specified relative to an arbitrary node in the tree) is sometimes also referred to as a
@@ -306,25 +320,25 @@ A path specified from the root (as oppos
 or as an "absolute path".
 The root node is referred to by ``"/"``, so the path from the root node to its grand-child would be ``"/child/grandchild"``, e.g.
 
-.. ipython:: python
+.. jupyter-execute::
 
     # access lisa's sibling by a relative path.
-    lisa["../Bart"]
+    print(lisa["../Bart"])
     # or from absolute path
-    lisa["/Homer/Bart"]
+    print(lisa["/Homer/Bart"])
 
 
 Relative paths between nodes also support the ``"../"`` syntax to mean the parent of the current node.
 We can use this with ``__setitem__`` to add a missing entry to our evolutionary tree, but add it relative to a more familiar node of interest:
 
-.. ipython:: python
+.. jupyter-execute::
 
     primates["../../Two Fenestrae/Crocodiles"] = xr.DataTree()
     print(vertebrates)
 
 Given two nodes in a tree, we can also find their relative path:
 
-.. ipython:: python
+.. jupyter-execute::
 
     bart.relative_to(lisa)
 
@@ -332,7 +346,7 @@ You can use this filepath feature to bui
 If we have a dictionary where each key is a valid path, and each value is either valid data or ``None``,
 we can construct a complex tree quickly using the alternative constructor :py:meth:`~xarray.DataTree.from_dict()`:
 
-.. ipython:: python
+.. jupyter-execute::
 
     d = {
         "/": xr.Dataset({"foo": "orange"}),
@@ -341,7 +355,7 @@ we can construct a complex tree quickly
         "a/c/d": None,
     }
     dt = xr.DataTree.from_dict(d)
-    dt
+    print(dt)
 
 .. note::
 
@@ -357,7 +371,7 @@ Iterating over trees
 You can iterate over every node in a tree using the subtree :py:class:`~xarray.DataTree.subtree` property.
 This returns an iterable of nodes, which yields them in depth-first order.
 
-.. ipython:: python
+.. jupyter-execute::
 
     for node in vertebrates.subtree:
         print(node.path)
@@ -372,12 +386,12 @@ For example, we could keep only the node
 checking if they contain any data using :py:class:`~xarray.DataTree.has_data`,
 then rebuilding a new tree using only the paths of those nodes:
 
-.. ipython:: python
+.. jupyter-execute::
 
     non_empty_nodes = {
         path: node.dataset for path, node in dt.subtree_with_keys if node.has_data
     }
-    xr.DataTree.from_dict(non_empty_nodes)
+    print(xr.DataTree.from_dict(non_empty_nodes))
 
 You can see this tree is similar to the ``dt`` object above, except that it is missing the empty nodes ``a/c`` and ``a/c/d``.
 
@@ -396,7 +410,7 @@ We can subset our tree to select only no
 Similarly to on a real filesystem, matching nodes by common patterns in their paths is often useful.
 We can use :py:meth:`xarray.DataTree.match` for this:
 
-.. ipython:: python
+.. jupyter-execute::
 
     dt = xr.DataTree.from_dict(
         {
@@ -407,14 +421,14 @@ We can use :py:meth:`xarray.DataTree.mat
         }
     )
     result = dt.match("*/B")
-    result
+    print(result)
 
 We can also subset trees by the contents of the nodes.
 :py:meth:`xarray.DataTree.filter` retains only the nodes of a tree that meet a certain condition.
 For example, we could recreate the Simpson's family tree with the ages of each individual, then filter for only the adults:
 First lets recreate the tree but with an ``age`` data variable in every node:
 
-.. ipython:: python
+.. jupyter-execute::
 
     simpsons = xr.DataTree.from_dict(
         {
@@ -427,18 +441,20 @@ First lets recreate the tree but with an
         },
         name="Abe",
     )
-    simpsons
+    print(simpsons)
 
 Now let's filter out the minors:
 
-.. ipython:: python
+.. jupyter-execute::
 
-    simpsons.filter(lambda node: node["age"] > 18)
+    print(simpsons.filter(lambda node: node["age"] > 18))
 
 The result is a new tree, containing only the nodes matching the condition.
 
 (Yes, under the hood :py:meth:`~xarray.DataTree.filter` is just syntactic sugar for the pattern we showed you in :ref:`iterating over trees` !)
 
+If you want to filter out empty nodes you can use :py:meth:`~xarray.DataTree.prune`.
+
 .. _Tree Contents:
 
 Tree Contents
@@ -454,7 +470,7 @@ You can check if a tree is a hollow tree
 We can see that the Simpson's family is not hollow because the data variable ``"age"`` is present at some nodes which
 have children (i.e. Abe and Homer).
 
-.. ipython:: python
+.. jupyter-execute::
 
     simpsons.is_hollow
 
@@ -471,7 +487,7 @@ Operations and Methods on Trees
 To show how applying operations across a whole tree at once can be useful,
 let's first create a example scientific dataset.
 
-.. ipython:: python
+.. jupyter-execute::
 
     def time_stamps(n_samples, T):
         """Create an array of evenly-spaced time stamps"""
@@ -518,22 +534,22 @@ let's first create a example scientific
             ),
         }
     )
-    voltages
+    print(voltages)
 
 Most xarray computation methods also exist as methods on datatree objects,
 so you can for example take the mean value of these two timeseries at once:
 
-.. ipython:: python
+.. jupyter-execute::
 
-    voltages.mean(dim="time")
+    print(voltages.mean(dim="time"))
 
 This works by mapping the standard :py:meth:`xarray.Dataset.mean()` method over the dataset stored in each node of the
 tree one-by-one.
 
 The arguments passed to the method are used for every node, so the values of the arguments you pass might be valid for one node and invalid for another
 
-.. ipython:: python
-    :okexcept:
+.. jupyter-execute::
+    :raises:
 
     voltages.isel(time=12)
 
@@ -545,9 +561,9 @@ Arithmetic Methods on Trees
 Arithmetic methods are also implemented, so you can e.g. add a scalar to every dataset in the tree at once.
 For example, we can advance the timeline of the Simpsons by a decade just by
 
-.. ipython:: python
+.. jupyter-execute::
 
-    simpsons + 10
+    print(simpsons + 10)
 
 See that the same change (fast-forwarding by adding 10 years to the age of each character) has been applied to every node.
 
@@ -565,16 +581,16 @@ and returns one (or more) xarray dataset
 
 For example, we can define a function to calculate the Root Mean Square of a timeseries
 
-.. ipython:: python
+.. jupyter-execute::
 
     def rms(signal):
         return np.sqrt(np.mean(signal**2))
 
 Then calculate the RMS value of these signals:
 
-.. ipython:: python
+.. jupyter-execute::
 
-    voltages.map_over_datasets(rms)
+    print(voltages.map_over_datasets(rms))
 
 .. _multiple trees:
 
@@ -595,7 +611,7 @@ To iterate over the corresponding nodes
 :py:class:`~xarray.DataTree.subtree_with_keys`. This combines well with
 :py:meth:`xarray.DataTree.from_dict()` to build a new tree:
 
-.. ipython:: python
+.. jupyter-execute::
 
     dt1 = xr.DataTree.from_dict({"a": xr.Dataset({"x": 1}), "b": xr.Dataset({"x": 2})})
     dt2 = xr.DataTree.from_dict(
@@ -604,14 +620,16 @@ To iterate over the corresponding nodes
     result = {}
     for path, (node1, node2) in xr.group_subtrees(dt1, dt2):
         result[path] = node1.dataset + node2.dataset
-    xr.DataTree.from_dict(result)
+    dt3 = xr.DataTree.from_dict(result)
+    print(dt3)
 
 Alternatively, you apply a function directly to paired datasets at every node
 using :py:func:`xarray.map_over_datasets`:
 
-.. ipython:: python
+.. jupyter-execute::
 
-    xr.map_over_datasets(lambda x, y: x + y, dt1, dt2)
+    dt3 = xr.map_over_datasets(lambda x, y: x + y, dt1, dt2)
+    print(dt3)
 
 Comparing Trees for Isomorphism
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -623,8 +641,8 @@ or "isomorphic", if the full paths to al
 Applying :py:func:`~xarray.group_subtrees` to trees with different structures
 raises :py:class:`~xarray.TreeIsomorphismError`:
 
-.. ipython:: python
-    :okexcept:
+.. jupyter-execute::
+    :raises:
 
     tree = xr.DataTree.from_dict({"a": None, "a/b": None, "a/c": None})
     simple_tree = xr.DataTree.from_dict({"a": None})
@@ -633,20 +651,20 @@ raises :py:class:`~xarray.TreeIsomorphis
 
 We can explicitly also check if any two trees are isomorphic using the :py:meth:`~xarray.DataTree.isomorphic` method:
 
-.. ipython:: python
+.. jupyter-execute::
 
     tree.isomorphic(simple_tree)
 
 Corresponding tree nodes do not need to have the same data in order to be considered isomorphic:
 
-.. ipython:: python
+.. jupyter-execute::
 
     tree_with_data = xr.DataTree.from_dict({"a": xr.Dataset({"foo": 1})})
     simple_tree.isomorphic(tree_with_data)
 
 They also do not need to define child nodes in the same order:
 
-.. ipython:: python
+.. jupyter-execute::
 
     reordered_tree = xr.DataTree.from_dict({"a": None, "a/c": None, "a/b": None})
     tree.isomorphic(reordered_tree)
@@ -657,7 +675,7 @@ Arithmetic Between Multiple Trees
 Arithmetic operations like multiplication are binary operations, so as long as we have two isomorphic trees,
 we can do arithmetic between them.
 
-.. ipython:: python
+.. jupyter-execute::
 
     currents = xr.DataTree.from_dict(
         {
@@ -681,16 +699,18 @@ we can do arithmetic between them.
             ),
         }
     )
-    currents
+    print(currents)
+
+.. jupyter-execute::
 
     currents.isomorphic(voltages)
 
 We could use this feature to quickly calculate the electrical power in our signal, P=IV.
 
-.. ipython:: python
+.. jupyter-execute::
 
     power = currents * voltages
-    power
+    print(power)
 
 .. _hierarchical-data.alignment-and-coordinate-inheritance:
 
@@ -712,7 +732,7 @@ Exact alignment means that shared dimens
 
 To demonstrate, let's first generate some example datasets which are not aligned with one another:
 
-.. ipython:: python
+.. jupyter-execute::
 
     # (drop the attributes just to make the printed representation shorter)
     ds = xr.tutorial.open_dataset("air_temperature").drop_attrs()
@@ -723,24 +743,24 @@ To demonstrate, let's first generate som
 
 These datasets have different lengths along the ``time`` dimension, and are therefore not aligned along that dimension.
 
-.. ipython:: python
+.. jupyter-execute::
 
-    ds_daily.sizes
-    ds_weekly.sizes
-    ds_monthly.sizes
+    print(ds_daily.sizes)
+    print(ds_weekly.sizes)
+    print(ds_monthly.sizes)
 
 We cannot store these non-alignable variables on a single :py:class:`~xarray.Dataset` object, because they do not exactly align:
 
-.. ipython:: python
-    :okexcept:
+.. jupyter-execute::
+    :raises:
 
     xr.align(ds_daily, ds_weekly, ds_monthly, join="exact")
 
 But we :ref:`previously said <why>` that multi-resolution data is a good use case for :py:class:`~xarray.DataTree`, so surely we should be able to store these in a single :py:class:`~xarray.DataTree`?
 If we first try to create a :py:class:`~xarray.DataTree` with these different-length time dimensions present in both parents and children, we will still get an alignment error:
 
-.. ipython:: python
-    :okexcept:
+.. jupyter-execute::
+    :raises:
 
     xr.DataTree.from_dict({"daily": ds_daily, "daily/weekly": ds_weekly})
 
@@ -757,27 +777,29 @@ This alignment check is performed up thr
 
 To represent our unalignable data in a single :py:class:`~xarray.DataTree`, we must instead place all variables which are a function of these different-length dimensions into nodes that are not direct descendents of one another, e.g. organize them as siblings.
 
-.. ipython:: python
+.. jupyter-execute::
 
     dt = xr.DataTree.from_dict(
         {"daily": ds_daily, "weekly": ds_weekly, "monthly": ds_monthly}
     )
-    dt
+    print(dt)
 
 Now we have a valid :py:class:`~xarray.DataTree` structure which contains all the data at each different time frequency, stored in a separate group.
 
 This is a useful way to organise our data because we can still operate on all the groups at once.
 For example we can extract all three timeseries at a specific lat-lon location:
 
-.. ipython:: python
+.. jupyter-execute::
 
-    dt.sel(lat=75, lon=300)
+    dt_sel = dt.sel(lat=75, lon=300)
+    print(dt_sel)
 
 or compute the standard deviation of each timeseries to find out how it varies with sampling frequency:
 
-.. ipython:: python
+.. jupyter-execute::
 
-    dt.std(dim="time")
+    dt_std = dt.std(dim="time")
+    print(dt_std)
 
 .. _coordinate-inheritance:
 
@@ -786,7 +808,7 @@ Coordinate Inheritance
 
 Notice that in the trees we constructed above there is some redundancy - the ``lat`` and ``lon`` variables appear in each sibling group, but are identical across the groups.
 
-.. ipython:: python
+.. jupyter-execute::
 
     dt
 
@@ -797,7 +819,7 @@ We can use "Coordinate Inheritance" to d
 
 Let's instead place only the time-dependent variables in the child groups, and put the non-time-dependent ``lat`` and ``lon`` variables in the parent (root) group:
 
-.. ipython:: python
+.. jupyter-execute::
 
     dt = xr.DataTree.from_dict(
         {
@@ -814,25 +836,30 @@ Defining the common coordinates just onc
 
 We can still access the coordinates defined in the parent groups from any of the child groups as if they were actually present on the child groups:
 
-.. ipython:: python
+.. jupyter-execute::
 
     dt.daily.coords
+
+.. jupyter-execute::
+
     dt["daily/lat"]
 
 As we can still access them, we say that the ``lat`` and ``lon`` coordinates in the child groups have been "inherited" from their common parent group.
 
 If we print just one of the child nodes, it will still display inherited coordinates, but explicitly mark them as such:
 
-.. ipython:: python
+.. jupyter-execute::
 
-    print(dt["/daily"])
+    dt["/daily"]
 
 This helps to differentiate which variables are defined on the datatree node that you are currently looking at, and which were defined somewhere above it.
 
 We can also still perform all the same operations on the whole tree:
 
-.. ipython:: python
+.. jupyter-execute::
 
     dt.sel(lat=[75], lon=[300])
 
+.. jupyter-execute::
+
     dt.std(dim="time")
diff -pruN 2025.03.1-8/doc/user-guide/index.rst 2025.10.1-1/doc/user-guide/index.rst
--- 2025.03.1-8/doc/user-guide/index.rst	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/doc/user-guide/index.rst	2025-10-10 10:38:05.000000000 +0000
@@ -3,28 +3,63 @@ User Guide
 ###########
 
 In this user guide, you will find detailed descriptions and
-examples that describe many common tasks that you can accomplish with xarray.
+examples that describe many common tasks that you can accomplish with Xarray.
 
 
 .. toctree::
    :maxdepth: 2
-   :hidden:
+   :caption: Data model
 
    terminology
    data-structures
+   hierarchical-data
+   dask
+
+
+.. toctree::
+   :maxdepth: 2
+   :caption: Core operations
+
    indexing
-   interpolation
+   combining
+   reshaping
    computation
    groupby
-   reshaping
-   combining
-   time-series
-   weather-climate
-   pandas
+   interpolation
+
+.. toctree::
+   :maxdepth: 2
+   :caption: I/O
+
    io
-   dask
+   complex-numbers
+
+.. toctree::
+   :maxdepth: 2
+   :caption: Visualization
+
    plotting
+
+
+.. toctree::
+   :maxdepth: 2
+   :caption: Interoperability
+
+   pandas
+   duckarrays
+   ecosystem
+
+
+.. toctree::
+   :maxdepth: 2
+   :caption: Domain-specific workflows
+
+   time-series
+   weather-climate
+
+.. toctree::
+   :maxdepth: 2
+   :caption: Options and Testing
+
    options
    testing
-   duckarrays
-   hierarchical-data
diff -pruN 2025.03.1-8/doc/user-guide/indexing.rst 2025.10.1-1/doc/user-guide/indexing.rst
--- 2025.03.1-8/doc/user-guide/indexing.rst	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/doc/user-guide/indexing.rst	2025-10-10 10:38:05.000000000 +0000
@@ -3,8 +3,9 @@
 Indexing and selecting data
 ===========================
 
-.. ipython:: python
-    :suppress:
+.. jupyter-execute::
+    :hide-code:
+    :hide-output:
 
     import numpy as np
     import pandas as pd
@@ -12,6 +13,8 @@ Indexing and selecting data
 
     np.random.seed(123456)
 
+    %xmode minimal
+
 Xarray offers extremely flexible indexing routines that combine the best
 features of NumPy and pandas for data selection.
 
@@ -62,7 +65,7 @@ Indexing a :py:class:`~xarray.DataArray`
 does for numpy arrays, except that the returned object is always another
 DataArray:
 
-.. ipython:: python
+.. jupyter-execute::
 
     da = xr.DataArray(
         np.random.rand(4, 3),
@@ -72,7 +75,13 @@ DataArray:
         ],
     )
     da[:2]
+
+.. jupyter-execute::
+
     da[0, 0]
+
+.. jupyter-execute::
+
     da[:, [2, 1]]
 
 Attributes are persisted in all indexing operations.
@@ -87,7 +96,7 @@ Xarray also supports label-based indexin
 we use a :py:class:`pandas.Index` under the hood, label based indexing is very
 fast. To do label based indexing, use the :py:attr:`~xarray.DataArray.loc` attribute:
 
-.. ipython:: python
+.. jupyter-execute::
 
     da.loc["2000-01-01":"2000-01-02", "IA"]
 
@@ -104,7 +113,7 @@ __ https://pandas.pydata.org/pandas-docs
 
 Setting values with label based indexing is also supported:
 
-.. ipython:: python
+.. jupyter-execute::
 
     da.loc["2000-01-01", ["IL", "IN"]] = -10
     da
@@ -119,22 +128,26 @@ use them explicitly to slice data. There
 1. Use the :py:meth:`~xarray.DataArray.sel` and :py:meth:`~xarray.DataArray.isel`
    convenience methods:
 
-    .. ipython:: python
+    .. jupyter-execute::
 
         # index by integer array indices
         da.isel(space=0, time=slice(None, 2))
 
+    .. jupyter-execute::
+
         # index by dimension coordinate labels
         da.sel(time=slice("2000-01-01", "2000-01-02"))
 
 2. Use a dictionary as the argument for array positional or label based array
    indexing:
 
-    .. ipython:: python
+    .. jupyter-execute::
 
         # index by integer array indices
         da[dict(space=0, time=slice(None, 2))]
 
+    .. jupyter-execute::
+
         # index by dimension coordinate labels
         da.loc[dict(time=slice("2000-01-01", "2000-01-02"))]
 
@@ -163,40 +176,45 @@ support ``method`` and ``tolerance`` key
 enabling nearest neighbor (inexact) lookups by use of the methods ``'pad'``,
 ``'backfill'`` or ``'nearest'``:
 
-.. ipython:: python
+.. jupyter-execute::
 
     da = xr.DataArray([1, 2, 3], [("x", [0, 1, 2])])
     da.sel(x=[1.1, 1.9], method="nearest")
+
+.. jupyter-execute::
+
     da.sel(x=0.1, method="backfill")
+
+.. jupyter-execute::
+
     da.reindex(x=[0.5, 1, 1.5, 2, 2.5], method="pad")
 
 Tolerance limits the maximum distance for valid matches with an inexact lookup:
 
-.. ipython:: python
+.. jupyter-execute::
 
     da.reindex(x=[1.1, 1.5], method="nearest", tolerance=0.2)
 
 The method parameter is not yet supported if any of the arguments
 to ``.sel()`` is a ``slice`` object:
 
-.. ipython::
-   :verbatim:
+.. jupyter-execute::
+   :raises:
 
-   In [1]: da.sel(x=slice(1, 3), method="nearest")
-   NotImplementedError
+   da.sel(x=slice(1, 3), method="nearest")
 
 However, you don't need to use ``method`` to do inexact slicing. Slicing
 already returns all values inside the range (inclusive), as long as the index
 labels are monotonic increasing:
 
-.. ipython:: python
+.. jupyter-execute::
 
     da.sel(x=slice(0.9, 3.1))
 
 Indexing axes with monotonic decreasing labels also works, as long as the
 ``slice`` or ``.loc`` arguments are also decreasing:
 
-.. ipython:: python
+.. jupyter-execute::
 
     reversed_da = da[::-1]
     reversed_da.loc[3.1:0.9]
@@ -216,7 +234,7 @@ Dataset indexing
 We can also use these methods to index all variables in a dataset
 simultaneously, returning a new dataset:
 
-.. ipython:: python
+.. jupyter-execute::
 
     da = xr.DataArray(
         np.random.rand(4, 3),
@@ -227,15 +245,21 @@ simultaneously, returning a new dataset:
     )
     ds = da.to_dataset(name="foo")
     ds.isel(space=[0], time=[0])
+
+.. jupyter-execute::
+
     ds.sel(time="2000-01-01")
 
 Positional indexing on a dataset is not supported because the ordering of
 dimensions in a dataset is somewhat ambiguous (it can vary between different
 arrays). However, you can do normal indexing with dimension names:
 
-.. ipython:: python
+.. jupyter-execute::
 
     ds[dict(space=[0], time=[0])]
+
+.. jupyter-execute::
+
     ds.loc[dict(time="2000-01-01")]
 
 Dropping labels and dimensions
@@ -244,7 +268,7 @@ Dropping labels and dimensions
 The :py:meth:`~xarray.Dataset.drop_sel` method returns a new object with the listed
 index labels along a dimension dropped:
 
-.. ipython:: python
+.. jupyter-execute::
 
     ds.drop_sel(space=["IN", "IL"])
 
@@ -253,7 +277,7 @@ index labels along a dimension dropped:
 Use :py:meth:`~xarray.Dataset.drop_dims` to drop a full dimension from a Dataset.
 Any variables with these dimensions are also dropped:
 
-.. ipython:: python
+.. jupyter-execute::
 
     ds.drop_dims("time")
 
@@ -267,7 +291,7 @@ However, it is sometimes useful to selec
 original data, but with some elements masked. To do this type of selection in
 xarray, use :py:meth:`~xarray.DataArray.where`:
 
-.. ipython:: python
+.. jupyter-execute::
 
     da = xr.DataArray(np.arange(16).reshape(4, 4), dims=["x", "y"])
     da.where(da.x + da.y < 4)
@@ -276,9 +300,9 @@ This is particularly useful for ragged i
 e.g., to apply a 2D mask to an image. Note that ``where`` follows all the
 usual xarray broadcasting and alignment rules for binary operations (e.g.,
 ``+``) between the object being indexed and the condition, as described in
-:ref:`comput`:
+:ref:`compute`:
 
-.. ipython:: python
+.. jupyter-execute::
 
     da.where(da.y < 2)
 
@@ -287,7 +311,7 @@ where the selected data size is much sma
 use of the option ``drop=True`` clips coordinate
 elements that are fully masked:
 
-.. ipython:: python
+.. jupyter-execute::
 
     da.where(da.y < 2, drop=True)
 
@@ -300,7 +324,7 @@ To check whether elements of an xarray o
 compare with the equality operator ``==`` (e.g., ``arr == 3``). To check
 multiple values, use :py:meth:`~xarray.DataArray.isin`:
 
-.. ipython:: python
+.. jupyter-execute::
 
     da = xr.DataArray([1, 2, 3, 4, 5], dims=["x"])
     da.isin([2, 4])
@@ -309,7 +333,7 @@ multiple values, use :py:meth:`~xarray.D
 :py:meth:`~xarray.DataArray.where` to support indexing by arrays that are not
 already labels of an array:
 
-.. ipython:: python
+.. jupyter-execute::
 
     lookup = xr.DataArray([-1, -2, -3, -4, -5], dims=["x"])
     da.where(lookup.isin([-2, -4]), drop=True)
@@ -332,7 +356,7 @@ understood as orthogonally. Each indexer
 the corresponding dimension, similar to how vector indexing works in Fortran or
 MATLAB, or after using the :py:func:`numpy.ix_` helper:
 
-.. ipython:: python
+.. jupyter-execute::
 
     da = xr.DataArray(
         np.arange(12).reshape((3, 4)),
@@ -340,6 +364,9 @@ MATLAB, or after using the :py:func:`num
         coords={"x": [0, 1, 2], "y": ["a", "b", "c", "d"]},
     )
     da
+
+.. jupyter-execute::
+
     da[[0, 2, 2], [1, 3]]
 
 For more flexibility, you can supply :py:meth:`~xarray.DataArray` objects
@@ -347,7 +374,7 @@ as indexers.
 Dimensions on resultant arrays are given by the ordered union of the indexers'
 dimensions:
 
-.. ipython:: python
+.. jupyter-execute::
 
     ind_x = xr.DataArray([0, 1], dims=["x"])
     ind_y = xr.DataArray([0, 1], dims=["y"])
@@ -356,7 +383,7 @@ dimensions:
 Slices or sequences/arrays without named-dimensions are treated as if they have
 the same dimension which is indexed along:
 
-.. ipython:: python
+.. jupyter-execute::
 
     # Because [0, 1] is used to index along dimension 'x',
     # it is assumed to have dimension 'x'
@@ -366,7 +393,7 @@ Furthermore, you can use multi-dimension
 as indexers, where the resultant array dimension is also determined by
 indexers' dimension:
 
-.. ipython:: python
+.. jupyter-execute::
 
     ind = xr.DataArray([[0, 1], [0, 1]], dims=["a", "b"])
     da[ind]
@@ -380,17 +407,19 @@ See :ref:`indexing.rules` for the comple
 
 Vectorized indexing also works with ``isel``, ``loc``, and ``sel``:
 
-.. ipython:: python
+.. jupyter-execute::
 
     ind = xr.DataArray([[0, 1], [0, 1]], dims=["a", "b"])
     da.isel(y=ind)  # same as da[:, ind]
 
+.. jupyter-execute::
+
     ind = xr.DataArray([["a", "b"], ["b", "a"]], dims=["a", "b"])
     da.loc[:, ind]  # same as da.sel(y=ind)
 
 These methods may also be applied to ``Dataset`` objects
 
-.. ipython:: python
+.. jupyter-execute::
 
     ds = da.to_dataset(name="bar")
     ds.isel(x=xr.DataArray([0, 1, 2], dims=["points"]))
@@ -405,7 +434,7 @@ of the closest latitude and longitude ar
 dimension named "points":
 
 
-.. ipython:: python
+.. jupyter-execute::
 
     ds = xr.tutorial.open_dataset("air_temperature")
 
@@ -440,7 +469,7 @@ Assigning values with indexing
 To select and assign values to a portion of a :py:meth:`~xarray.DataArray` you
 can use indexing with ``.loc`` :
 
-.. ipython:: python
+.. jupyter-execute::
 
     ds = xr.tutorial.open_dataset("air_temperature")
 
@@ -459,7 +488,7 @@ can use indexing with ``.loc`` :
 
 or :py:meth:`~xarray.where`:
 
-.. ipython:: python
+.. jupyter-execute::
 
     # modify one grid point using xr.where()
     ds["empty"] = xr.where(
@@ -479,7 +508,7 @@ or :py:meth:`~xarray.where`:
 
 Vectorized indexing can also be used to assign values to xarray object.
 
-.. ipython:: python
+.. jupyter-execute::
 
     da = xr.DataArray(
         np.arange(12).reshape((3, 4)),
@@ -487,20 +516,27 @@ Vectorized indexing can also be used to
         coords={"x": [0, 1, 2], "y": ["a", "b", "c", "d"]},
     )
     da
+
+.. jupyter-execute::
+
     da[0] = -1  # assignment with broadcasting
     da
 
+.. jupyter-execute::
+
     ind_x = xr.DataArray([0, 1], dims=["x"])
     ind_y = xr.DataArray([0, 1], dims=["y"])
     da[ind_x, ind_y] = -2  # assign -2 to (ix, iy) = (0, 0) and (1, 1)
     da
 
+.. jupyter-execute::
+
     da[ind_x, ind_y] += 100  # increment is also possible
     da
 
 Like ``numpy.ndarray``, value assignment sometimes works differently from what one may expect.
 
-.. ipython:: python
+.. jupyter-execute::
 
     da = xr.DataArray([0, 1, 2, 3], dims=["x"])
     ind = xr.DataArray([0, 0, 0], dims=["x"])
@@ -539,7 +575,7 @@ __ https://numpy.org/doc/stable/user/bas
 
   Assigning values with the chained indexing using ``.sel`` or ``.isel`` fails silently.
 
-  .. ipython:: python
+  .. jupyter-execute::
 
       da = xr.DataArray([0, 1, 2, 3], dims=["x"])
       # DO NOT do this
@@ -548,8 +584,8 @@ __ https://numpy.org/doc/stable/user/bas
 
 You can also assign values to all variables of a :py:class:`Dataset` at once:
 
-.. ipython:: python
-    :okwarning:
+.. jupyter-execute::
+    :stderr:
 
     ds_org = xr.tutorial.open_dataset("eraint_uvz").isel(
         latitude=slice(56, 59), longitude=slice(255, 258), level=0
@@ -558,18 +594,30 @@ You can also assign values to all variab
     ds = xr.zeros_like(ds_org)
     ds
 
+.. jupyter-execute::
+
     # by integer
     ds[dict(latitude=2, longitude=2)] = 1
     ds["u"]
+
+.. jupyter-execute::
+
     ds["v"]
 
+.. jupyter-execute::
+
     # by label
     ds.loc[dict(latitude=47.25, longitude=[11.25, 12])] = 100
     ds["u"]
 
+.. jupyter-execute::
+
     # dataset as new values
     new_dat = ds_org.loc[dict(latitude=48, longitude=[11.25, 12])]
     new_dat
+
+.. jupyter-execute::
+
     ds.loc[dict(latitude=47.25, longitude=[11.25, 12])] = new_dat
     ds["u"]
 
@@ -584,10 +632,13 @@ More advanced indexing
 The use of :py:meth:`~xarray.DataArray` objects as indexers enables very
 flexible indexing. The following is an example of the pointwise indexing:
 
-.. ipython:: python
+.. jupyter-execute::
 
     da = xr.DataArray(np.arange(56).reshape((7, 8)), dims=["x", "y"])
     da
+
+.. jupyter-execute::
+
     da.isel(x=xr.DataArray([0, 1, 6], dims="z"), y=xr.DataArray([0, 1, 0], dims="z"))
 
 
@@ -597,7 +648,7 @@ and mapped along a new dimension ``z``.
 If you want to add a coordinate to the new dimension ``z``,
 you can supply a :py:class:`~xarray.DataArray` with a coordinate,
 
-.. ipython:: python
+.. jupyter-execute::
 
     da.isel(
         x=xr.DataArray([0, 1, 6], dims="z", coords={"z": ["a", "b", "c"]}),
@@ -607,7 +658,7 @@ you can supply a :py:class:`~xarray.Data
 Analogously, label-based pointwise-indexing is also possible by the ``.sel``
 method:
 
-.. ipython:: python
+.. jupyter-execute::
 
     da = xr.DataArray(
         np.random.rand(4, 3),
@@ -638,14 +689,14 @@ useful for greater control and for incre
 
 To reindex a particular dimension, use :py:meth:`~xarray.DataArray.reindex`:
 
-.. ipython:: python
+.. jupyter-execute::
 
     da.reindex(space=["IA", "CA"])
 
 The :py:meth:`~xarray.DataArray.reindex_like` method is a useful shortcut.
 To demonstrate, we will make a subset DataArray with new values:
 
-.. ipython:: python
+.. jupyter-execute::
 
     foo = da.rename("foo")
     baz = (10 * da[:2, :2]).rename("baz")
@@ -654,32 +705,41 @@ To demonstrate, we will make a subset Da
 Reindexing ``foo`` with ``baz`` selects out the first two values along each
 dimension:
 
-.. ipython:: python
+.. jupyter-execute::
 
     foo.reindex_like(baz)
 
 The opposite operation asks us to reindex to a larger shape, so we fill in
 the missing values with ``NaN``:
 
-.. ipython:: python
+.. jupyter-execute::
 
     baz.reindex_like(foo)
 
 The :py:func:`~xarray.align` function lets us perform more flexible database-like
 ``'inner'``, ``'outer'``, ``'left'`` and ``'right'`` joins:
 
-.. ipython:: python
+.. jupyter-execute::
 
     xr.align(foo, baz, join="inner")
+
+.. jupyter-execute::
+
     xr.align(foo, baz, join="outer")
 
 Both ``reindex_like`` and ``align`` work interchangeably between
 :py:class:`~xarray.DataArray` and :py:class:`~xarray.Dataset` objects, and with any number of matching dimension names:
 
-.. ipython:: python
+.. jupyter-execute::
 
     ds
+
+.. jupyter-execute::
+
     ds.reindex_like(baz)
+
+.. jupyter-execute::
+
     other = xr.DataArray(["a", "b", "c"], dims="other")
     # this is a no-op, because there are no shared dimension names
     ds.reindex_like(other)
@@ -693,7 +753,7 @@ Coordinate labels for each dimension are
 based indexing with ``.sel`` and ``.loc`` uses standard positional,
 integer-based indexing as a fallback for dimensions without a coordinate label:
 
-.. ipython:: python
+.. jupyter-execute::
 
     da = xr.DataArray([1, 2, 3], dims="x")
     da.sel(x=[0, -1])
@@ -702,11 +762,10 @@ Alignment between xarray objects where o
 succeeds only if all dimensions of the same name have the same length.
 Otherwise, it raises an informative error:
 
-.. ipython::
-    :verbatim:
+.. jupyter-execute::
+    :raises:
 
-    In [62]: xr.align(da, da[:2])
-    ValueError: arguments without labels along dimension 'x' cannot be aligned because they have different dimension sizes: {2, 3}
+    xr.align(da, da[:2])
 
 Underlying Indexes
 ------------------
@@ -715,7 +774,7 @@ Xarray uses the :py:class:`pandas.Index`
 operations.  If you need to access the underlying indexes, they are available
 through the :py:attr:`~xarray.DataArray.indexes` attribute.
 
-.. ipython:: python
+.. jupyter-execute::
 
     da = xr.DataArray(
         np.random.rand(4, 3),
@@ -725,17 +784,26 @@ through the :py:attr:`~xarray.DataArray.
         ],
     )
     da
+
+.. jupyter-execute::
+
     da.indexes
+
+.. jupyter-execute::
+
     da.indexes["time"]
 
 Use :py:meth:`~xarray.DataArray.get_index` to get an index for a dimension,
 falling back to a default :py:class:`pandas.RangeIndex` if it has no coordinate
 labels:
 
-.. ipython:: python
+.. jupyter-execute::
 
     da = xr.DataArray([1, 2, 3], dims="x")
     da
+
+.. jupyter-execute::
+
     da.get_index("x")
 
 
@@ -780,30 +848,33 @@ Just like pandas, advanced indexing on m
 i.e., a tuple of slices, labels, list of labels, or any selector allowed by
 pandas:
 
-.. ipython:: python
+.. jupyter-execute::
 
     midx = pd.MultiIndex.from_product([list("abc"), [0, 1]], names=("one", "two"))
     mda = xr.DataArray(np.random.rand(6, 3), [("x", midx), ("y", range(3))])
     mda
+
+.. jupyter-execute::
+
     mda.sel(x=(list("ab"), [0]))
 
 You can also select multiple elements by providing a list of labels or tuples or
 a slice of tuples:
 
-.. ipython:: python
+.. jupyter-execute::
 
     mda.sel(x=[("a", 0), ("b", 1)])
 
 Additionally, xarray supports dictionaries:
 
-.. ipython:: python
+.. jupyter-execute::
 
     mda.sel(x={"one": "a", "two": 0})
 
 For convenience, ``sel`` also accepts multi-index levels directly
 as keyword arguments:
 
-.. ipython:: python
+.. jupyter-execute::
 
     mda.sel(one="a", two=0)
 
@@ -815,7 +886,7 @@ Like pandas, xarray handles partial sele
 As shown below, it also renames the dimension / coordinate when the
 multi-index is reduced to a single index.
 
-.. ipython:: python
+.. jupyter-execute::
 
     mda.loc[{"one": "a"}, ...]
 
diff -pruN 2025.03.1-8/doc/user-guide/interpolation.rst 2025.10.1-1/doc/user-guide/interpolation.rst
--- 2025.03.1-8/doc/user-guide/interpolation.rst	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/doc/user-guide/interpolation.rst	2025-10-10 10:38:05.000000000 +0000
@@ -3,12 +3,13 @@
 Interpolating data
 ==================
 
-.. ipython:: python
-    :suppress:
+.. jupyter-execute::
+    :hide-code:
 
     import numpy as np
     import pandas as pd
     import xarray as xr
+    import matplotlib.pyplot as plt
 
     np.random.seed(123456)
 
@@ -26,7 +27,7 @@ Scalar and 1-dimensional interpolation
 Interpolating a :py:class:`~xarray.DataArray` works mostly like labeled
 indexing of a :py:class:`~xarray.DataArray`,
 
-.. ipython:: python
+.. jupyter-execute::
 
     da = xr.DataArray(
         np.sin(0.3 * np.arange(12).reshape(4, 3)),
@@ -35,6 +36,8 @@ indexing of a :py:class:`~xarray.DataArr
     # label lookup
     da.sel(time=3)
 
+.. jupyter-execute::
+
     # interpolation
     da.interp(time=2.5)
 
@@ -42,17 +45,19 @@ indexing of a :py:class:`~xarray.DataArr
 Similar to the indexing, :py:meth:`~xarray.DataArray.interp` also accepts an
 array-like, which gives the interpolated result as an array.
 
-.. ipython:: python
+.. jupyter-execute::
 
     # label lookup
     da.sel(time=[2, 3])
 
+.. jupyter-execute::
+
     # interpolation
     da.interp(time=[2.5, 3.5])
 
 To interpolate data with a :py:doc:`numpy.datetime64 <numpy:reference/arrays.datetime>` coordinate you can pass a string.
 
-.. ipython:: python
+.. jupyter-execute::
 
     da_dt64 = xr.DataArray(
         [1, 3], [("time", pd.date_range("1/1/2000", "1/3/2000", periods=2))]
@@ -62,7 +67,7 @@ To interpolate data with a :py:doc:`nump
 The interpolated data can be merged into the original :py:class:`~xarray.DataArray`
 by specifying the time periods required.
 
-.. ipython:: python
+.. jupyter-execute::
 
     da_dt64.interp(time=pd.date_range("1/1/2000", "1/3/2000", periods=3))
 
@@ -84,21 +89,25 @@ Like :py:meth:`~xarray.DataArray.sel`, :
 accepts multiple coordinates. In this case, multidimensional interpolation
 is carried out.
 
-.. ipython:: python
+.. jupyter-execute::
 
     # label lookup
     da.sel(time=2, space=0.1)
 
+.. jupyter-execute::
+
     # interpolation
     da.interp(time=2.5, space=0.15)
 
 Array-like coordinates are also accepted:
 
-.. ipython:: python
+.. jupyter-execute::
 
     # label lookup
     da.sel(time=[2, 3], space=[0.1, 0.2])
 
+.. jupyter-execute::
+
     # interpolation
     da.interp(time=[1.5, 2.5], space=[0.15, 0.25])
 
@@ -109,7 +118,7 @@ object. For example, if we want to compu
 two :py:class:`~xarray.DataArray` s (``da`` and ``other``) staying on slightly
 different coordinates,
 
-.. ipython:: python
+.. jupyter-execute::
 
     other = xr.DataArray(
         np.sin(0.4 * np.arange(9).reshape(3, 3)),
@@ -120,7 +129,7 @@ it might be a good idea to first interpo
 same coordinates of ``other``, and then subtract it.
 :py:meth:`~xarray.DataArray.interp_like` can be used for such a case,
 
-.. ipython:: python
+.. jupyter-execute::
 
     # interpolate da along other's coordinates
     interpolated = da.interp_like(other)
@@ -142,7 +151,7 @@ used.
 
 The interpolation method can be specified by the optional ``method`` argument.
 
-.. ipython:: python
+.. jupyter-execute::
 
     da = xr.DataArray(
         np.sin(np.linspace(0, 2 * np.pi, 10)),
@@ -153,17 +162,22 @@ The interpolation method can be specifie
     da.plot.line("o", label="original")
     da.interp(x=np.linspace(0, 1, 100)).plot.line(label="linear (default)")
     da.interp(x=np.linspace(0, 1, 100), method="cubic").plot.line(label="cubic")
-    @savefig interpolation_sample1.png width=4in
-    plt.legend()
+    plt.legend();
 
 Additional keyword arguments can be passed to scipy's functions.
 
-.. ipython:: python
+.. jupyter-execute::
 
     # fill 0 for the outside of the original coordinates.
     da.interp(x=np.linspace(-0.5, 1.5, 10), kwargs={"fill_value": 0.0})
+
+.. jupyter-execute::
+
     # 1-dimensional extrapolation
     da.interp(x=np.linspace(-0.5, 1.5, 10), kwargs={"fill_value": "extrapolate"})
+
+.. jupyter-execute::
+
     # multi-dimensional extrapolation
     da = xr.DataArray(
         np.sin(0.3 * np.arange(12).reshape(4, 3)),
@@ -194,7 +208,7 @@ a common dimension as new coordinate.
 
 For example:
 
-.. ipython:: python
+.. jupyter-execute::
 
     da = xr.DataArray(
         np.sin(0.3 * np.arange(20).reshape(5, 4)),
@@ -205,6 +219,8 @@ For example:
     y = xr.DataArray([0.1, 0.2, 0.3], dims="z")
     da.sel(x=x, y=y)
 
+.. jupyter-execute::
+
     # advanced interpolation, without extrapolation
     x = xr.DataArray([0.5, 1.5, 2.5, 3.5], dims="z")
     y = xr.DataArray([0.15, 0.25, 0.35, 0.45], dims="z")
@@ -220,7 +236,7 @@ If you want to add a coordinate to the n
 :py:class:`~xarray.DataArray` s with a coordinate. Extrapolation can be achieved
 by passing additional arguments to SciPy's ``interpnd`` function,
 
-.. ipython:: python
+.. jupyter-execute::
 
     x = xr.DataArray([0.5, 1.5, 2.5, 3.5], dims="z", coords={"z": ["a", "b", "c", "d"]})
     y = xr.DataArray(
@@ -242,19 +258,25 @@ the same way that
 ``linear`` and ``nearest`` methods return arrays including NaN,
 while other methods such as ``cubic`` or ``quadratic`` return all NaN arrays.
 
-.. ipython:: python
+.. jupyter-execute::
 
     da = xr.DataArray([0, 2, np.nan, 3, 3.25], dims="x", coords={"x": range(5)})
     da.interp(x=[0.5, 1.5, 2.5])
+
+.. jupyter-execute::
+
     da.interp(x=[0.5, 1.5, 2.5], method="cubic")
 
 To avoid this, you can drop NaN by :py:meth:`~xarray.DataArray.dropna`, and
 then make the interpolation
 
-.. ipython:: python
+.. jupyter-execute::
 
     dropped = da.dropna("x")
     dropped
+
+.. jupyter-execute::
+
     dropped.interp(x=[0.5, 1.5, 2.5], method="cubic")
 
 If NaNs are distributed randomly in your multidimensional array,
@@ -263,7 +285,7 @@ dropping all the columns containing more
 In such a case, you can fill NaN by :py:meth:`~xarray.DataArray.interpolate_na`,
 which is similar to :py:meth:`pandas.Series.interpolate`.
 
-.. ipython:: python
+.. jupyter-execute::
 
     filled = da.interpolate_na(dim="x")
     filled
@@ -271,7 +293,7 @@ which is similar to :py:meth:`pandas.Ser
 This fills NaN by interpolating along the specified dimension.
 After filling NaNs, you can interpolate:
 
-.. ipython:: python
+.. jupyter-execute::
 
     filled.interp(x=[0.5, 1.5, 2.5], method="cubic")
 
@@ -284,7 +306,7 @@ Example
 
 Let's see how :py:meth:`~xarray.DataArray.interp` works on real data.
 
-.. ipython:: python
+.. jupyter-execute::
 
     # Raw data
     ds = xr.tutorial.open_dataset("air_temperature").isel(time=0)
@@ -297,14 +319,13 @@ Let's see how :py:meth:`~xarray.DataArra
     new_lat = np.linspace(ds.lat[0].item(), ds.lat[-1].item(), ds.sizes["lat"] * 4)
     dsi = ds.interp(lat=new_lat, lon=new_lon)
     dsi.air.plot(ax=axes[1])
-    @savefig interpolation_sample3.png width=8in
-    axes[1].set_title("Interpolated data")
+    axes[1].set_title("Interpolated data");
 
 Our advanced interpolation can be used to remap the data to the new coordinate.
 Consider the new coordinates x and z on the two dimensional plane.
 The remapping can be done as follows
 
-.. ipython:: python
+.. jupyter-execute::
 
     # new coordinate
     x = np.linspace(240, 300, 100)
@@ -328,5 +349,4 @@ The remapping can be done as follows
 
     dsi = ds.interp(lon=lon, lat=lat)
     dsi.air.plot(ax=axes[1])
-    @savefig interpolation_sample4.png width=8in
-    axes[1].set_title("Remapped data")
+    axes[1].set_title("Remapped data");
diff -pruN 2025.03.1-8/doc/user-guide/io.rst 2025.10.1-1/doc/user-guide/io.rst
--- 2025.03.1-8/doc/user-guide/io.rst	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/doc/user-guide/io.rst	2025-10-10 10:38:05.000000000 +0000
@@ -8,8 +8,8 @@ Xarray supports direct serialization and
 simple :ref:`io.pickle` files to the more flexible :ref:`io.netcdf`
 format (recommended).
 
-.. ipython:: python
-    :suppress:
+.. jupyter-execute::
+    :hide-code:
 
     import os
 
@@ -41,37 +41,58 @@ Following the diagram is detailed inform
 You can learn more about using and developing backends in the
 `Xarray tutorial JupyterBook <https://tutorial.xarray.dev/advanced/backends/backends.html>`_.
 
+..
+   _comment: mermaid Flowcharg "link" text gets secondary color background, SVG icon fill gets primary color
+
+.. raw:: html
+
+    <style>
+      /* Ensure PST link colors don't override mermaid text colors */
+      .mermaid a {
+        color: white;
+      }
+      .mermaid a:hover {
+        color: magenta;
+        text-decoration-color: magenta;
+      }
+      .mermaid a:visited {
+        color: white;
+        text-decoration-color: white;
+      }
+    </style>
+
 .. mermaid::
+    :config: {"theme":"base","themeVariables":{"fontSize":"20px","primaryColor":"#fff","primaryTextColor":"#fff","primaryBorderColor":"#59c7d6","lineColor":"#e28126","secondaryColor":"#767985"}}
     :alt: Flowchart illustrating how to choose the right backend engine to read your data
 
     flowchart LR
-        built-in-eng["""Is your data stored in one of these formats?
-            - netCDF4 (<code>netcdf4</code>)
-            - netCDF3 (<code>scipy</code>)
-            - Zarr (<code>zarr</code>)
-            - DODS/OPeNDAP (<code>pydap</code>)
-            - HDF5 (<code>h5netcdf</code>)
-            """]
+        built-in-eng["`**Is your data stored in one of these formats?**
+            - netCDF4
+            - netCDF3
+            - Zarr
+            - DODS/OPeNDAP
+            - HDF5
+            `"]
 
-        built-in("""You're in luck! Xarray bundles a backend for this format.
+        built-in("`**You're in luck!** Xarray bundles a backend to automatically read these formats.
             Open data using <code>xr.open_dataset()</code>. We recommend
-            always setting the engine you want to use.""")
+            explicitly setting engine='xxxx' for faster loading.`")
 
-        installed-eng["""One of these formats?
-            - <a href='https://github.com/ecmwf/cfgrib'>GRIB (<code>cfgrib</code>)
-            - <a href='https://tiledb-inc.github.io/TileDB-CF-Py/documentation/index.html'>TileDB (<code>tiledb</code>)
-            - <a href='https://corteva.github.io/rioxarray/stable/getting_started/getting_started.html#rioxarray'>GeoTIFF, JPEG-2000, ESRI-hdf (<code>rioxarray</code>, via GDAL)
-            - <a href='https://www.bopen.eu/xarray-sentinel-open-source-library/'>Sentinel-1 SAFE (<code>xarray-sentinel</code>)
+        installed-eng["""<b>One of these formats?</b>
+            - <a href='https://github.com/ecmwf/cfgrib'>GRIB</a>
+            - <a href='https://tiledb-inc.github.io/TileDB-CF-Py/documentation'>TileDB</a>
+            - <a href='https://corteva.github.io/rioxarray/stable/getting_started/getting_started.html#rioxarray'>GeoTIFF, JPEG-2000, etc. (via GDAL)</a>
+            - <a href='https://www.bopen.eu/xarray-sentinel-open-source-library/'>Sentinel-1 SAFE</a>
             """]
 
-        installed("""Install the package indicated in parentheses to your
-            Python environment. Restart the kernel and use
-            <code>xr.open_dataset(files, engine='rioxarray')</code>.""")
-
-        other("""Ask around to see if someone in your data community
-            has created an Xarray backend for your data type.
-            If not, you may need to create your own or consider
-            exporting your data to a more common format.""")
+        installed("""Install the linked backend library and use it with
+            <code>xr.open_dataset(file, engine='xxxx')</code>.""")
+
+        other["`**Options:**
+            - Look around to see if someone has created an Xarray backend for your format!
+            - <a href='https://docs.xarray.dev/en/stable/internals/how-to-add-new-backend.html'>Create your own backend</a>
+            - Convert your data to a supported format
+            `"]
 
         built-in-eng -->|Yes| built-in
         built-in-eng -->|No| installed-eng
@@ -79,16 +100,16 @@ You can learn more about using and devel
         installed-eng -->|Yes| installed
         installed-eng -->|No| other
 
-        click built-in-eng "https://docs.xarray.dev/en/stable/getting-started-guide/faq.html#how-do-i-open-format-x-file-as-an-xarray-dataset"
-        click other "https://docs.xarray.dev/en/stable/internals/how-to-add-new-backend.html"
+        click built-in-eng "https://docs.xarray.dev/en/stable/get-help/faq.html#how-do-i-open-format-x-file-as-an-xarray-dataset"
 
-        classDef quesNodefmt fill:#9DEEF4,stroke:#206C89,text-align:left
+
+        classDef quesNodefmt font-size:12pt,fill:#0e4666,stroke:#59c7d6,stroke-width:3
         class built-in-eng,installed-eng quesNodefmt
 
-        classDef ansNodefmt fill:#FFAA05,stroke:#E37F17,text-align:left,white-space:nowrap
+        classDef ansNodefmt font-size:12pt,fill:#4a4a4a,stroke:#17afb4,stroke-width:3
         class built-in,installed,other ansNodefmt
 
-        linkStyle default font-size:20pt,color:#206C89
+        linkStyle default font-size:18pt,stroke-width:4
 
 
 .. _io.netcdf:
@@ -125,7 +146,7 @@ __ https://github.com/Unidata/netcdf4-py
 We can save a Dataset to disk using the
 :py:meth:`Dataset.to_netcdf` method:
 
-.. ipython:: python
+.. jupyter-execute::
 
     ds = xr.Dataset(
         {"foo": (("x", "y"), np.random.rand(4, 5))},
@@ -153,13 +174,13 @@ the ``format`` and ``engine`` arguments.
 We can load netCDF files to create a new Dataset using
 :py:func:`open_dataset`:
 
-.. ipython:: python
+.. jupyter-execute::
 
     ds_disk = xr.open_dataset("saved_on_disk.nc")
     ds_disk
 
-.. ipython:: python
-    :suppress:
+.. jupyter-execute::
+    :hide-code:
 
     # Close "saved_on_disk.nc", but retain the file until after closing or deleting other
     # datasets that will refer to it.
@@ -209,7 +230,7 @@ is modified: the original file on disk i
 Datasets have a :py:meth:`Dataset.close` method to close the associated
 netCDF file. However, it's often cleaner to use a ``with`` statement:
 
-.. ipython:: python
+.. jupyter-execute::
 
     # this automatically closes the dataset after use
     with xr.open_dataset("saved_on_disk.nc") as ds:
@@ -283,9 +304,12 @@ You can view this encoding information (
 :py:attr:`DataArray.encoding` and
 :py:attr:`DataArray.encoding` attributes:
 
-.. ipython:: python
+.. jupyter-execute::
 
     ds_disk["y"].encoding
+
+.. jupyter-execute::
+
     ds_disk.encoding
 
 Note that all operations that manipulate variables other than indexing
@@ -295,7 +319,7 @@ In some cases it is useful to intentiona
 This can be done with either the :py:meth:`Dataset.drop_encoding` or
 :py:meth:`DataArray.drop_encoding` methods.
 
-.. ipython:: python
+.. jupyter-execute::
 
     ds_no_encoding = ds_disk.drop_encoding()
     ds_no_encoding.encoding
@@ -567,8 +591,8 @@ The library ``h5netcdf`` allows writing
 allowed in netCDF4 (see
 `h5netcdf documentation <https://github.com/h5netcdf/h5netcdf#invalid-netcdf-files>`_).
 This feature is available through :py:meth:`DataArray.to_netcdf` and
-:py:meth:`Dataset.to_netcdf` when used with ``engine="h5netcdf"``
-and currently raises a warning unless ``invalid_netcdf=True`` is set.
+:py:meth:`Dataset.to_netcdf` when used with ``engine="h5netcdf"``, only if
+``invalid_netcdf=True`` is explicitly set.
 
 .. warning::
 
@@ -594,7 +618,7 @@ with ``conda install h5netcdf``. Once in
 The similarities between HDF5 and netCDF4 mean that HDF5 data can be written with the
 same :py:meth:`Dataset.to_netcdf` method as used for netCDF4 data:
 
-.. ipython:: python
+.. jupyter-execute::
 
     ds = xr.Dataset(
         {"foo": (("x", "y"), np.random.rand(4, 5))},
@@ -655,13 +679,13 @@ To write a dataset with zarr, we use the
 
 To write to a local directory, we pass a path to a directory:
 
-.. ipython:: python
-    :suppress:
+.. jupyter-execute::
+    :hide-code:
 
     ! rm -rf path/to/directory.zarr
 
-.. ipython:: python
-    :okwarning:
+.. jupyter-execute::
+    :stderr:
 
     ds = xr.Dataset(
         {"foo": (("x", "y"), np.random.rand(4, 5))},
@@ -671,7 +695,7 @@ To write to a local directory, we pass a
             "z": ("x", list("abcd")),
         },
     )
-    ds.to_zarr("path/to/directory.zarr")
+    ds.to_zarr("path/to/directory.zarr", zarr_format=2, consolidated=False)
 
 (The suffix ``.zarr`` is optional--just a reminder that a zarr store lives
 there.) If the directory does not exist, it will be created. If a zarr
@@ -697,10 +721,9 @@ To store variable length strings, conver
 To read back a zarr dataset that has been created this way, we use the
 :py:func:`open_zarr` method:
 
-.. ipython:: python
-    :okwarning:
+.. jupyter-execute::
 
-    ds_zarr = xr.open_zarr("path/to/directory.zarr")
+    ds_zarr = xr.open_zarr("path/to/directory.zarr", consolidated=False)
     ds_zarr
 
 Cloud Storage Buckets
@@ -729,23 +752,57 @@ key ```storage_options``, part of ``back
 This also works with ``open_mfdataset``, allowing you to pass a list of paths or
 a URL to be interpreted as a glob string.
 
-For writing, you must explicitly set up a ``MutableMapping``
-instance and pass this, as follows:
+For writing, you may either specify a bucket URL or explicitly set up a
+``zarr.abc.store.Store`` instance, as follows:
 
-.. code:: python
+.. tab:: URL
+
+    .. code:: python
 
-    import gcsfs
+        # write to the bucket via GCS URL
+        ds.to_zarr("gs://<bucket/path/to/data.zarr>")
+        # read it back
+        ds_gcs = xr.open_zarr("gs://<bucket/path/to/data.zarr>")
 
-    fs = gcsfs.GCSFileSystem(project="<project-name>", token=None)
-    gcsmap = gcsfs.mapping.GCSMap("<bucket-name>", gcs=fs, check=True, create=False)
-    # write to the bucket
-    ds.to_zarr(store=gcsmap)
-    # read it back
-    ds_gcs = xr.open_zarr(gcsmap)
+.. tab:: fsspec
+
+    .. code:: python
+
+        import gcsfs
+        import zarr
+
+        # manually manage the cloud filesystem connection -- useful, for example,
+        # when you need to manage permissions to cloud resources
+        fs = gcsfs.GCSFileSystem(project="<project-name>", token=None)
+        zstore = zarr.storage.FsspecStore(fs, path="<bucket/path/to/data.zarr>")
+
+        # write to the bucket
+        ds.to_zarr(store=zstore)
+        # read it back
+        ds_gcs = xr.open_zarr(zstore)
+
+.. tab:: obstore
+
+    .. code:: python
+
+        import obstore
+        import zarr
+
+        # alternatively, obstore offers a modern, performant interface for
+        # cloud buckets
+        gcsstore = obstore.store.GCSStore(
+            "<bucket>", prefix="<path/to/data.zarr>", skip_signature=True
+        )
+        zstore = zarr.store.ObjectStore(gcsstore)
+
+        # write to the bucket
+        ds.to_zarr(store=zstore)
+        # read it back
+        ds_gcs = xr.open_zarr(zstore)
 
-(or use the utility function ``fsspec.get_mapper()``).
 
 .. _fsspec: https://filesystem-spec.readthedocs.io/en/latest/
+.. _obstore: https://developmentseed.org/obstore/latest/
 .. _Zarr: https://zarr.readthedocs.io/
 .. _Amazon S3: https://aws.amazon.com/s3/
 .. _Google Cloud Storage: https://cloud.google.com/storage/
@@ -767,13 +824,12 @@ without writing all of its array data. T
 ``to_zarr`` with ``compute=False`` to write only metadata (including ``attrs``)
 to Zarr:
 
-.. ipython:: python
-    :suppress:
+.. jupyter-execute::
+    :hide-code:
 
     ! rm -rf path/to/directory.zarr
 
-.. ipython:: python
-    :okwarning:
+.. jupyter-execute::
 
     import dask.array
 
@@ -783,7 +839,7 @@ to Zarr:
     ds = xr.Dataset({"foo": ("x", dummies)}, coords={"x": np.arange(30)})
     path = "path/to/directory.zarr"
     # Now we write the metadata without computing any array values
-    ds.to_zarr(path, compute=False)
+    ds.to_zarr(path, compute=False, consolidated=False)
 
 Now, a Zarr store with the correct variable shapes and attributes exists that
 can be filled out by subsequent calls to ``to_zarr``.
@@ -792,15 +848,15 @@ correct alignment of the new data with t
 explicit mapping from dimension names to Python ``slice`` objects indicating
 where the data should be written (in index space, not label space), e.g.,
 
-.. ipython:: python
+.. jupyter-execute::
 
     # For convenience, we'll slice a single dataset, but in the real use-case
     # we would create them separately possibly even from separate processes.
     ds = xr.Dataset({"foo": ("x", np.arange(30))}, coords={"x": np.arange(30)})
     # Any of the following region specifications are valid
-    ds.isel(x=slice(0, 10)).to_zarr(path, region="auto")
-    ds.isel(x=slice(10, 20)).to_zarr(path, region={"x": "auto"})
-    ds.isel(x=slice(20, 30)).to_zarr(path, region={"x": slice(20, 30)})
+    ds.isel(x=slice(0, 10)).to_zarr(path, region="auto", consolidated=False)
+    ds.isel(x=slice(10, 20)).to_zarr(path, region={"x": "auto"}, consolidated=False)
+    ds.isel(x=slice(20, 30)).to_zarr(path, region={"x": slice(20, 30)}, consolidated=False)
 
 Concurrent writes with ``region`` are safe as long as they modify distinct
 chunks in the underlying Zarr arrays (or use an appropriate ``lock``).
@@ -815,24 +871,23 @@ Zarr Compressors and Filters
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
 There are many different `options for compression and filtering possible with
-zarr <https://zarr.readthedocs.io/en/stable/tutorial.html#compressors>`_.
+zarr <https://zarr.readthedocs.io/en/stable/user-guide/arrays.html#compressors>`_.
 
 These options can be passed to the ``to_zarr`` method as variable encoding.
 For example:
 
-.. ipython:: python
-    :suppress:
+.. jupyter-execute::
+    :hide-code:
 
     ! rm -rf foo.zarr
 
-.. ipython:: python
-    :okwarning:
+.. jupyter-execute::
 
     import zarr
-    from numcodecs.blosc import Blosc
+    from zarr.codecs import BloscCodec
 
-    compressor = Blosc(cname="zstd", clevel=3, shuffle=2)
-    ds.to_zarr("foo.zarr", encoding={"foo": {"compressor": compressor}})
+    compressor = BloscCodec(cname="zstd", clevel=3, shuffle="shuffle")
+    ds.to_zarr("foo.zarr", consolidated=False, encoding={"foo": {"compressors": [compressor]}})
 
 .. note::
 
@@ -871,13 +926,12 @@ To resize and then append values along a
 ``append_dim``. This is a good option if data always arrives in a particular
 order, e.g., for time-stepping a simulation:
 
-.. ipython:: python
-    :suppress:
+.. jupyter-execute::
+    :hide-code:
 
     ! rm -rf path/to/directory.zarr
 
-.. ipython:: python
-    :okwarning:
+.. jupyter-execute::
 
     ds1 = xr.Dataset(
         {"foo": (("x", "y", "t"), np.random.rand(4, 5, 2))},
@@ -887,7 +941,10 @@ order, e.g., for time-stepping a simulat
             "t": pd.date_range("2001-01-01", periods=2),
         },
     )
-    ds1.to_zarr("path/to/directory.zarr")
+    ds1.to_zarr("path/to/directory.zarr", consolidated=False)
+
+.. jupyter-execute::
+
     ds2 = xr.Dataset(
         {"foo": (("x", "y", "t"), np.random.rand(4, 5, 2))},
         coords={
@@ -896,7 +953,7 @@ order, e.g., for time-stepping a simulat
             "t": pd.date_range("2001-01-03", periods=2),
         },
     )
-    ds2.to_zarr("path/to/directory.zarr", append_dim="t")
+    ds2.to_zarr("path/to/directory.zarr", append_dim="t", consolidated=False)
 
 .. _io.zarr.writing_chunks:
 
@@ -932,7 +989,7 @@ For example, let's say we're working wit
 ``('time', 'x', 'y')``, a variable ``Tair`` which is chunked in ``x`` and ``y``,
 and two multi-dimensional coordinates ``xc`` and ``yc``:
 
-.. ipython:: python
+.. jupyter-execute::
 
     ds = xr.tutorial.open_dataset("rasm")
 
@@ -944,26 +1001,25 @@ These multi-dimensional coordinates are
 space on disk or in memory, yet when writing to disk the default zarr behavior is to
 split them into chunks:
 
-.. ipython:: python
-    :okwarning:
+.. jupyter-execute::
 
-    ds.to_zarr("path/to/directory.zarr", mode="w")
-    ! ls -R path/to/directory.zarr
+    ds.to_zarr("path/to/directory.zarr", consolidated=False, mode="w")
+    !tree -I zarr.json path/to/directory.zarr
 
 
 This may cause unwanted overhead on some systems, such as when reading from a cloud
 storage provider. To disable this chunking, we can specify a chunk size equal to the
-length of each dimension by using the shorthand chunk size ``-1``:
+shape of each coordinate array in the ``encoding`` argument:
 
-.. ipython:: python
-    :okwarning:
+.. jupyter-execute::
 
     ds.to_zarr(
         "path/to/directory.zarr",
-        encoding={"xc": {"chunks": (-1, -1)}, "yc": {"chunks": (-1, -1)}},
+        encoding={"xc": {"chunks": ds.xc.shape}, "yc": {"chunks": ds.yc.shape}},
+        consolidated=False,
         mode="w",
     )
-    ! ls -R path/to/directory.zarr
+    !tree -I zarr.json path/to/directory.zarr
 
 
 The number of chunks on Tair matches our dask chunks, while there is now only a single
@@ -1002,7 +1058,7 @@ By default Xarray uses a feature called
 *consolidated metadata*, storing all metadata for the entire dataset with a
 single key (by default called ``.zmetadata``). This typically drastically speeds
 up opening the store. (For more information on this feature, consult the
-`zarr docs on consolidating metadata <https://zarr.readthedocs.io/en/latest/tutorial.html#consolidating-metadata>`_.)
+`zarr docs on consolidating metadata <https://zarr.readthedocs.io/en/latest/user-guide/consolidated_metadata.html>`_.)
 
 By default, xarray writes consolidated metadata and attempts to read stores
 with consolidated metadata, falling back to use non-consolidated metadata for
@@ -1042,7 +1098,7 @@ with ``_FillValue`` using the ``use_zarr
 Kerchunk
 --------
 
-`Kerchunk <https://fsspec.github.io/kerchunk/index.html>`_ is a Python library
+`Kerchunk <https://fsspec.github.io/kerchunk>`_ is a Python library
 that allows you to access chunked and compressed data formats (such as NetCDF3, NetCDF4, HDF5, GRIB2, TIFF & FITS),
 many of which are primary data formats for many data archives, by viewing the
 whole archive as an ephemeral `Zarr`_ dataset which allows for parallel, chunk-specific access.
@@ -1066,23 +1122,19 @@ with ``xarray``, especially when these a
 reference can refer to thousands of the original data files present in these archives.
 You can view the whole dataset with from this combined reference using the above packages.
 
-The following example shows opening a combined references generated from a ``.hdf`` file stored locally.
-
-.. ipython:: python
-
-    storage_options = {
-        "target_protocol": "file",
-    }
+The following example shows opening a single ``json`` reference to the ``saved_on_disk.h5`` file created above.
+If the file were instead stored remotely (e.g. ``s3://saved_on_disk.h5``) you can use ``storage_options``
+that are used to `configure fsspec <https://filesystem-spec.readthedocs.io/en/latest/api.html#fsspec.implementations.reference.ReferenceFileSystem.__init__>`_:
 
-    # add the `remote_protocol` key in `storage_options` if you're accessing a file remotely
+.. jupyter-execute::
 
-    ds1 = xr.open_dataset(
+    ds_kerchunked = xr.open_dataset(
         "./combined.json",
         engine="kerchunk",
-        storage_options=storage_options,
+        storage_options={},
     )
 
-    ds1
+    ds_kerchunked
 
 .. note::
 
@@ -1104,7 +1156,7 @@ DataArray ``to_iris`` and ``from_iris``
 If iris is installed, xarray can convert a ``DataArray`` into a ``Cube`` using
 :py:meth:`DataArray.to_iris`:
 
-.. ipython:: python
+.. jupyter-execute::
 
     da = xr.DataArray(
         np.random.rand(4, 5),
@@ -1113,12 +1165,12 @@ If iris is installed, xarray can convert
     )
 
     cube = da.to_iris()
-    cube
+    print(cube)
 
 Conversely, we can create a new ``DataArray`` object from a ``Cube`` using
 :py:meth:`DataArray.from_iris`:
 
-.. ipython:: python
+.. jupyter-execute::
 
     da_cube = xr.DataArray.from_iris(cube)
     da_cube
@@ -1130,27 +1182,34 @@ datasets.  It uses the file saving and l
 more "correct" translation between them, but still with very low overhead and not
 using actual disk files.
 
-For example:
+Here we load an xarray dataset and convert it to Iris cubes:
 
-.. ipython:: python
-    :okwarning:
+.. jupyter-execute::
+    :stderr:
 
     ds = xr.tutorial.open_dataset("air_temperature_gradient")
     cubes = ncdata.iris_xarray.cubes_from_xarray(ds)
     print(cubes)
+
+.. jupyter-execute::
+
     print(cubes[1])
 
-.. ipython:: python
-    :okwarning:
+And we can convert the cubes back to an xarray dataset:
+
+.. jupyter-execute::
+
+    # ensure dataset-level and variable-level attributes loaded correctly
+    iris.FUTURE.save_split_attrs = True
 
     ds = ncdata.iris_xarray.cubes_to_xarray(cubes)
-    print(ds)
+    ds
 
 Ncdata can also adjust file data within load and save operations, to fix data loading
 problems or provide exact save formatting without needing to modify files on disk.
 See for example : `ncdata usage examples`_
 
-.. _Iris: https://scitools.org.uk/iris
+.. _Iris: https://scitools-iris.readthedocs.io
 .. _Ncdata: https://ncdata.readthedocs.io/en/latest/index.html
 .. _ncdata usage examples: https://github.com/pp-mo/ncdata/tree/v0.1.2?tab=readme-ov-file#correct-a-miscoded-attribute-in-iris-input
 
@@ -1168,28 +1227,17 @@ For example, we can open a connection to
 __ https://www.prism.oregonstate.edu/
 __ https://iri.columbia.edu/
 
-.. ipython source code for this section
-   we don't use this to avoid hitting the DAP server on every doc build.
 
-   remote_data = xr.open_dataset(
-       'http://iridl.ldeo.columbia.edu/SOURCES/.OSU/.PRISM/.monthly/dods',
-       decode_times=False)
-   tmax = remote_data.tmax[:500, ::3, ::3]
-   tmax
-
-   @savefig opendap-prism-tmax.png
-   tmax[0].plot()
-
-.. ipython::
-    :verbatim:
-
-    In [3]: remote_data = xr.open_dataset(
-       ...:     "http://iridl.ldeo.columbia.edu/SOURCES/.OSU/.PRISM/.monthly/dods",
-       ...:     decode_times=False,
-       ...: )
+.. jupyter-input::
+
+    remote_data = xr.open_dataset(
+        "http://iridl.ldeo.columbia.edu/SOURCES/.OSU/.PRISM/.monthly/dods",
+        decode_times=False,
+        )
+    remote_data
+
+.. jupyter-output::
 
-    In [4]: remote_data
-    Out[4]:
     <xarray.Dataset>
     Dimensions:  (T: 1422, X: 1405, Y: 621)
     Coordinates:
@@ -1221,13 +1269,13 @@ __ https://iri.columbia.edu/
 We can select and slice this data any number of times, and nothing is loaded
 over the network until we look at particular values:
 
-.. ipython::
-    :verbatim:
+.. jupyter-input::
+
+    tmax = remote_data["tmax"][:500, ::3, ::3]
+    tmax
 
-    In [4]: tmax = remote_data["tmax"][:500, ::3, ::3]
+.. jupyter-output::
 
-    In [5]: tmax
-    Out[5]:
     <xarray.DataArray 'tmax' (T: 500, Y: 207, X: 469)>
     [48541500 values with dtype=float64]
     Coordinates:
@@ -1240,42 +1288,51 @@ over the network until we look at partic
         units: Celsius_scale
         expires: 1443657600
 
+.. jupyter-input::
+
     # the data is downloaded automatically when we make the plot
-    In [6]: tmax[0].plot()
+    tmax[0].plot()
 
 .. image:: ../_static/opendap-prism-tmax.png
 
-Some servers require authentication before we can access the data. For this
-purpose we can explicitly create a :py:class:`backends.PydapDataStore`
-and pass in a `Requests`__ session object. For example for
-HTTP Basic authentication::
+Some servers require authentication before we can access the data. Pydap uses
+a `Requests`__ session object (which the user can pre-define), and this
+session object can recover `authentication`__` credentials from a locally stored
+``.netrc`` file. For example, to connect to a server that requires NASA's
+URS authentication, with the username/password credentials stored on a locally
+accessible ``.netrc``, access to OPeNDAP data should be as simple as this::
 
     import xarray as xr
     import requests
 
-    session = requests.Session()
-    session.auth = ('username', 'password')
+    my_session = requests.Session()
+
+    ds_url = 'https://gpm1.gesdisc.eosdis.nasa.gov/opendap/hyrax/example.nc'
+
+    ds = xr.open_dataset(ds_url, session=my_session, engine="pydap")
 
-    store = xr.backends.PydapDataStore.open('http://example.com/data',
-                                            session=session)
-    ds = xr.open_dataset(store)
+Moreover, a bearer token header can be included in a `Requests`__ session
+object, allowing for token-based authentication which  OPeNDAP servers can use
+to avoid some redirects.
 
-`Pydap's cas module`__ has functions that generate custom sessions for
-servers that use CAS single sign-on. For example, to connect to servers
-that require NASA's URS authentication::
 
-  import xarray as xr
-  from pydata.cas.urs import setup_session
+Lastly, OPeNDAP servers may provide endpoint URLs for different OPeNDAP protocols,
+DAP2 and DAP4. To specify which protocol between the two options to use, you can
+replace the scheme of the url with the name of the protocol. For example::
 
-  ds_url = 'https://gpm1.gesdisc.eosdis.nasa.gov/opendap/hyrax/example.nc'
+    # dap2 url
+    ds_url = 'dap2://gpm1.gesdisc.eosdis.nasa.gov/opendap/hyrax/example.nc'
 
-  session = setup_session('username', 'password', check_url=ds_url)
-  store = xr.backends.PydapDataStore.open(ds_url, session=session)
+    # dap4 url
+    ds_url = 'dap4://gpm1.gesdisc.eosdis.nasa.gov/opendap/hyrax/example.nc'
 
-  ds = xr.open_dataset(store)
+While most OPeNDAP servers implement DAP2, not all servers implement DAP4. It
+is recommended to check if the URL you are using `supports DAP4`__ by checking the
+URL on a browser.
 
 __ https://docs.python-requests.org
-__ https://www.pydap.org/en/latest/client.html#authentication
+__ https://pydap.github.io/pydap/en/notebooks/Authentication.html
+__ https://pydap.github.io/pydap/en/faqs/dap2_or_dap4_url.html
 
 .. _io.pickle:
 
@@ -1285,7 +1342,7 @@ Pickle
 The simplest way to serialize an xarray object is to use Python's built-in pickle
 module:
 
-.. ipython:: python
+.. jupyter-execute::
 
     import pickle
 
@@ -1320,18 +1377,16 @@ Dictionary
 We can convert a ``Dataset`` (or a ``DataArray``) to a dict using
 :py:meth:`Dataset.to_dict`:
 
-.. ipython:: python
+.. jupyter-execute::
 
     ds = xr.Dataset({"foo": ("x", np.arange(30))})
-    ds
-
     d = ds.to_dict()
     d
 
 We can create a new xarray object from a dict using
 :py:meth:`Dataset.from_dict`:
 
-.. ipython:: python
+.. jupyter-execute::
 
     ds_dict = xr.Dataset.from_dict(d)
     ds_dict
@@ -1344,19 +1399,22 @@ be quite large.
 To export just the dataset schema without the data itself, use the
 ``data=False`` option:
 
-.. ipython:: python
+.. jupyter-execute::
 
     ds.to_dict(data=False)
 
-.. ipython:: python
-    :suppress:
+.. jupyter-execute::
+    :hide-code:
 
     # We're now done with the dataset named `ds`.  Although the `with` statement closed
     # the dataset, displaying the unpickled pickle of `ds` re-opened "saved_on_disk.nc".
     # However, `ds` (rather than the unpickled dataset) refers to the open file.  Delete
     # `ds` to close the file.
     del ds
-    os.remove("saved_on_disk.nc")
+
+    for f in ["saved_on_disk.nc", "saved_on_disk.h5"]:
+        if os.path.exists(f):
+            os.remove(f)
 
 This can be useful for generating indices of dataset contents to expose to
 search indices or other automated data discovery tools.
@@ -1369,15 +1427,15 @@ Rasterio
 GDAL readable raster data using `rasterio`_  such as GeoTIFFs can be opened using the `rioxarray`_ extension.
 `rioxarray`_ can also handle geospatial related tasks such as re-projecting and clipping.
 
-.. ipython::
-    :verbatim:
+.. jupyter-input::
 
-    In [1]: import rioxarray
+    import rioxarray
 
-    In [2]: rds = rioxarray.open_rasterio("RGB.byte.tif")
+    rds = rioxarray.open_rasterio("RGB.byte.tif")
+    rds
+
+.. jupyter-output::
 
-    In [3]: rds
-    Out[3]:
     <xarray.DataArray (band: 3, y: 718, x: 791)>
     [1703814 values with dtype=uint8]
     Coordinates:
@@ -1396,15 +1454,17 @@ GDAL readable raster data using `rasteri
         add_offset:          0.0
         grid_mapping:        spatial_ref
 
-    In [4]: rds.rio.crs
-    Out[4]: CRS.from_epsg(32618)
+.. jupyter-input::
+
+    rds.rio.crs
+    # CRS.from_epsg(32618)
 
-    In [5]: rds4326 = rds.rio.reproject("epsg:4326")
+    rds4326 = rds.rio.reproject("epsg:4326")
 
-    In [6]: rds4326.rio.crs
-    Out[6]: CRS.from_epsg(4326)
+    rds4326.rio.crs
+    # CRS.from_epsg(4326)
 
-    In [7]: rds4326.rio.to_raster("RGB.byte.4326.tif")
+    rds4326.rio.to_raster("RGB.byte.4326.tif")
 
 
 .. _rasterio: https://rasterio.readthedocs.io/en/latest/
@@ -1414,8 +1474,8 @@ GDAL readable raster data using `rasteri
 
 .. _io.cfgrib:
 
-.. ipython:: python
-    :suppress:
+.. jupyter-execute::
+    :hide-code:
 
     import shutil
 
@@ -1429,10 +1489,9 @@ Xarray supports reading GRIB files via E
 if it is installed. To open a GRIB file supply ``engine='cfgrib'``
 to :py:func:`open_dataset` after installing cfgrib_:
 
-.. ipython::
-    :verbatim:
+.. jupyter-input::
 
-    In [1]: ds_grib = xr.open_dataset("example.grib", engine="cfgrib")
+    ds_grib = xr.open_dataset("example.grib", engine="cfgrib")
 
 We recommend installing cfgrib via conda::
 
diff -pruN 2025.03.1-8/doc/user-guide/pandas.rst 2025.10.1-1/doc/user-guide/pandas.rst
--- 2025.03.1-8/doc/user-guide/pandas.rst	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/doc/user-guide/pandas.rst	2025-10-10 10:38:05.000000000 +0000
@@ -14,8 +14,8 @@ aware libraries such as `Seaborn`__.
 __ https://pandas.pydata.org/pandas-docs/stable/visualization.html
 __ https://seaborn.pydata.org/
 
-.. ipython:: python
-    :suppress:
+.. jupyter-execute::
+    :hide-code:
 
     import numpy as np
     import pandas as pd
@@ -46,7 +46,7 @@ Dataset and DataFrame
 To convert any dataset to a ``DataFrame`` in tidy form, use the
 :py:meth:`Dataset.to_dataframe()` method:
 
-.. ipython:: python
+.. jupyter-execute::
 
     ds = xr.Dataset(
         {"foo": (("x", "y"), np.random.randn(2, 3))},
@@ -58,6 +58,9 @@ To convert any dataset to a ``DataFrame`
         },
     )
     ds
+
+.. jupyter-execute::
+
     df = ds.to_dataframe()
     df
 
@@ -74,7 +77,7 @@ To create a ``Dataset`` from a ``DataFra
 :py:meth:`Dataset.from_dataframe` class method or the equivalent
 :py:meth:`pandas.DataFrame.to_xarray` method:
 
-.. ipython:: python
+.. jupyter-execute::
 
     xr.Dataset.from_dataframe(df)
 
@@ -95,19 +98,25 @@ DataArray and Series
 of ``Series``. The methods are very similar to those for working with
 DataFrames:
 
-.. ipython:: python
+.. jupyter-execute::
 
     s = ds["foo"].to_series()
     s
+
+.. jupyter-execute::
+
     # or equivalently, with Series.to_xarray()
     xr.DataArray.from_series(s)
 
 Both the ``from_series`` and ``from_dataframe`` methods use reindexing, so they
 work even if the hierarchical index is not a full tensor product:
 
-.. ipython:: python
+.. jupyter-execute::
 
     s[::2]
+
+.. jupyter-execute::
+
     s[::2].to_xarray()
 
 Lossless and reversible conversion
@@ -141,7 +150,7 @@ DataArray directly into a pandas object
 available in pandas (i.e., a 1D array is converted to a
 :py:class:`~pandas.Series` and 2D to :py:class:`~pandas.DataFrame`):
 
-.. ipython:: python
+.. jupyter-execute::
 
     arr = xr.DataArray(
         np.random.randn(2, 3), coords=[("x", [10, 20]), ("y", ["a", "b", "c"])]
@@ -153,7 +162,7 @@ To perform the inverse operation of conv
 array with the same shape, simply use the :py:class:`DataArray`
 constructor:
 
-.. ipython:: python
+.. jupyter-execute::
 
     xr.DataArray(df)
 
@@ -161,7 +170,7 @@ Both the ``DataArray`` and ``Dataset`` c
 objects into xarray objects with the same shape. This means that they
 preserve all use of multi-indexes:
 
-.. ipython:: python
+.. jupyter-execute::
 
     index = pd.MultiIndex.from_arrays(
         [["a", "a", "b"], [0, 1, 2]], names=["one", "two"]
@@ -200,20 +209,21 @@ So you can represent a Panel, in two way
 
 Let's take a look:
 
-.. ipython:: python
+.. jupyter-execute::
 
-    data = np.random.default_rng(0).rand(2, 3, 4)
+    data = np.random.default_rng(0).random((2, 3, 4))
     items = list("ab")
     major_axis = list("mno")
     minor_axis = pd.date_range(start="2000", periods=4, name="date")
 
 With old versions of pandas (prior to 0.25), this could stored in a ``Panel``:
 
-.. ipython::
-    :verbatim:
+.. jupyter-input::
+
+    pd.Panel(data, items, major_axis, minor_axis)
+
+.. jupyter-output::
 
-    In [1]: pd.Panel(data, items, major_axis, minor_axis)
-    Out[1]:
     <class 'pandas.core.panel.Panel'>
     Dimensions: 2 (items) x 3 (major_axis) x 4 (minor_axis)
     Items axis: a to b
@@ -222,7 +232,7 @@ With old versions of pandas (prior to 0.
 
 To put this data in a ``DataArray``, write:
 
-.. ipython:: python
+.. jupyter-execute::
 
     array = xr.DataArray(data, [items, major_axis, minor_axis])
     array
@@ -233,7 +243,7 @@ respectively, while the third retains it
 
 You can also easily convert this data into ``Dataset``:
 
-.. ipython:: python
+.. jupyter-execute::
 
     array.to_dataset(dim="dim_0")
 
diff -pruN 2025.03.1-8/doc/user-guide/plotting.rst 2025.10.1-1/doc/user-guide/plotting.rst
--- 2025.03.1-8/doc/user-guide/plotting.rst	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/doc/user-guide/plotting.rst	2025-10-10 10:38:05.000000000 +0000
@@ -45,14 +45,14 @@ For more extensive plotting applications
   dynamic plots (backed by ``Holoviews`` or ``Geoviews``) by adding a ``hvplot``
   accessor to DataArrays.
 
-- `Cartopy <https://scitools.org.uk/cartopy/docs/latest/>`_: Provides cartographic
+- `Cartopy <https://cartopy.readthedocs.io/stable/>`_: Provides cartographic
   tools.
 
 Imports
 ~~~~~~~
 
-.. ipython:: python
-    :suppress:
+.. jupyter-execute::
+    :hide-code:
 
     # Use defaults so we don't get gridlines in generated docs
     import matplotlib as mpl
@@ -61,20 +61,23 @@ Imports
 
 The following imports are necessary for all of the examples.
 
-.. ipython:: python
+.. jupyter-execute::
 
+    import cartopy.crs as ccrs
+    import matplotlib.pyplot as plt
     import numpy as np
     import pandas as pd
-    import matplotlib.pyplot as plt
     import xarray as xr
 
 For these examples we'll use the North American air temperature dataset.
 
-.. ipython:: python
+.. jupyter-execute::
 
     airtemps = xr.tutorial.open_dataset("air_temperature")
     airtemps
 
+.. jupyter-execute::
+
     # Convert to celsius
     air = airtemps.air - 273.15
 
@@ -98,13 +101,10 @@ One Dimension
 
 The simplest way to make a plot is to call the :py:func:`DataArray.plot()` method.
 
-.. ipython:: python
-    :okwarning:
+.. jupyter-execute::
 
     air1d = air.isel(lat=10, lon=10)
-
-    @savefig plotting_1d_simple.png width=4in
-    air1d.plot()
+    air1d.plot();
 
 Xarray uses the coordinate name along with metadata ``attrs.long_name``,
 ``attrs.standard_name``, ``DataArray.name`` and ``attrs.units`` (if available)
@@ -114,7 +114,7 @@ The names ``long_name``, ``standard_name
 When choosing names, the order of precedence is ``long_name``, ``standard_name`` and finally ``DataArray.name``.
 The y-axis label in the above plot was constructed from the ``long_name`` and ``units`` attributes of ``air1d``.
 
-.. ipython:: python
+.. jupyter-execute::
 
     air1d.attrs
 
@@ -131,11 +131,9 @@ can be used:
 
 .. _matplotlib.pyplot.plot: https://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.plot
 
-.. ipython:: python
-    :okwarning:
+.. jupyter-execute::
 
-    @savefig plotting_1d_additional_args.png width=4in
-    air1d[:200].plot.line("b-^")
+    air1d[:200].plot.line("b-^");
 
 .. note::
     Not all xarray plotting methods support passing positional arguments
@@ -144,11 +142,9 @@ can be used:
 
 Keyword arguments work the same way, and are more explicit.
 
-.. ipython:: python
-    :okwarning:
+.. jupyter-execute::
 
-    @savefig plotting_example_sin3.png width=4in
-    air1d[:200].plot.line(color="purple", marker="o")
+    air1d[:200].plot.line(color="purple", marker="o");
 
 =========================
  Adding to Existing Axis
@@ -159,20 +155,14 @@ To add the plot to an existing axis pass
 In this example ``axs`` is an array consisting of the left and right
 axes created by ``plt.subplots``.
 
-.. ipython:: python
-    :okwarning:
+.. jupyter-execute::
 
     fig, axs = plt.subplots(ncols=2)
 
-    axs
+    print(axs)
 
     air1d.plot(ax=axs[0])
-    air1d.plot.hist(ax=axs[1])
-
-    plt.tight_layout()
-
-    @savefig plotting_example_existing_axes.png width=6in
-    plt.draw()
+    air1d.plot.hist(ax=axs[1]);
 
 On the right is a histogram created by :py:func:`xarray.plot.hist`.
 
@@ -187,18 +177,9 @@ control the figure size. For convenience
 support the ``aspect`` and ``size`` arguments which control the size of the
 resulting image via the formula ``figsize = (aspect * size, size)``:
 
-.. ipython:: python
-    :okwarning:
+.. jupyter-execute::
 
-    air1d.plot(aspect=2, size=3)
-    @savefig plotting_example_size_and_aspect.png
-    plt.tight_layout()
-
-.. ipython:: python
-    :suppress:
-
-    # create a dummy figure so sphinx plots everything below normally
-    plt.figure()
+    air1d.plot(aspect=2, size=3);
 
 This feature also works with :ref:`plotting.faceting`. For facet plots,
 ``size`` and ``aspect`` refer to a single panel (so that ``aspect * size``
@@ -229,8 +210,7 @@ However, you can also use non-dimension
 without coordinates along the x-axis. To illustrate this, let's calculate a 'decimal day' (epoch)
 from the time and assign it as a non-dimension coordinate:
 
-.. ipython:: python
-    :okwarning:
+.. jupyter-execute::
 
     decimal_day = (air1d.time - air1d.time[0]) / pd.Timedelta("1d")
     air1d_multi = air1d.assign_coords(decimal_day=("time", decimal_day.data))
@@ -238,27 +218,24 @@ from the time and assign it as a non-dim
 
 To use ``'decimal_day'`` as x coordinate it must be explicitly specified:
 
-.. ipython:: python
-    :okwarning:
+.. jupyter-execute::
 
-    air1d_multi.plot(x="decimal_day")
+    air1d_multi.plot(x="decimal_day");
 
 Creating a new MultiIndex named ``'date'`` from ``'time'`` and ``'decimal_day'``,
 it is also possible to use a MultiIndex level as x-axis:
 
-.. ipython:: python
-    :okwarning:
+.. jupyter-execute::
 
     air1d_multi = air1d_multi.set_index(date=("time", "decimal_day"))
-    air1d_multi.plot(x="decimal_day")
+    air1d_multi.plot(x="decimal_day");
 
 Finally, if a dataset does not have any coordinates it enumerates all data points:
 
-.. ipython:: python
-    :okwarning:
+.. jupyter-execute::
 
     air1d_multi = air1d_multi.drop_vars(["date", "time", "decimal_day"])
-    air1d_multi.plot()
+    air1d_multi.plot();
 
 The same applies to 2D plots below.
 
@@ -270,11 +247,9 @@ It is possible to make line plots of two
 with appropriate arguments. Consider the 3D variable ``air`` defined above. We can use line
 plots to check the variation of air temperature at three different latitudes along a longitude line:
 
-.. ipython:: python
-    :okwarning:
+.. jupyter-execute::
 
-    @savefig plotting_example_multiple_lines_x_kwarg.png
-    air.isel(lon=10, lat=[19, 21, 22]).plot.line(x="time")
+    air.isel(lon=10, lat=[19, 21, 22]).plot.line(x="time");
 
 It is required to explicitly specify either
 
@@ -292,11 +267,9 @@ If required, the automatic legend can be
 
 It is also possible to make line plots such that the data are on the x-axis and a dimension is on the y-axis. This can be done by specifying the appropriate ``y`` keyword argument.
 
-.. ipython:: python
-    :okwarning:
+.. jupyter-execute::
 
-    @savefig plotting_example_xy_kwarg.png
-    air.isel(time=10, lon=[10, 11]).plot(y="lat", hue="lon")
+    air.isel(time=10, lon=[10, 11]).plot(y="lat", hue="lon");
 
 ============
  Step plots
@@ -305,18 +278,15 @@ It is also possible to make line plots s
 As an alternative, also a step plot similar to matplotlib's ``plt.step`` can be
 made using 1D data.
 
-.. ipython:: python
-    :okwarning:
+.. jupyter-execute::
 
-    @savefig plotting_example_step.png width=4in
-    air1d[:20].plot.step(where="mid")
+    air1d[:20].plot.step(where="mid");
 
 The argument ``where`` defines where the steps should be placed, options are
 ``'pre'`` (default), ``'post'``, and ``'mid'``. This is particularly handy
 when plotting data grouped with :py:meth:`Dataset.groupby_bins`.
 
-.. ipython:: python
-    :okwarning:
+.. jupyter-execute::
 
     air_grp = air.mean(["time", "lon"]).groupby_bins("lat", [0, 23.5, 66.5, 90])
     air_mean = air_grp.mean()
@@ -325,8 +295,7 @@ when plotting data grouped with :py:meth
     (air_mean + air_std).plot.step(ls=":")
     (air_mean - air_std).plot.step(ls=":")
     plt.ylim(-20, 30)
-    @savefig plotting_example_step_groupby.png width=4in
-    plt.title("Zonal mean temperature")
+    plt.title("Zonal mean temperature");
 
 In this case, the actual boundaries of the bins are used and the ``where`` argument
 is ignored.
@@ -338,13 +307,11 @@ Other axes kwargs
 
 The keyword arguments ``xincrease`` and ``yincrease`` let you control the axes direction.
 
-.. ipython:: python
-    :okwarning:
+.. jupyter-execute::
 
-    @savefig plotting_example_xincrease_yincrease_kwarg.png
     air.isel(time=10, lon=[10, 11]).plot.line(
         y="lat", hue="lon", xincrease=False, yincrease=False
-    )
+    );
 
 In addition, one can use ``xscale, yscale`` to set axes scaling;
 ``xticks, yticks`` to set axes ticks and ``xlim, ylim`` to set axes limits.
@@ -362,22 +329,17 @@ Two Dimensions
 The default method :py:meth:`DataArray.plot` calls :py:func:`xarray.plot.pcolormesh`
 by default when the data is two-dimensional.
 
-.. ipython:: python
-    :okwarning:
+.. jupyter-execute::
 
     air2d = air.isel(time=500)
-
-    @savefig 2d_simple.png width=4in
-    air2d.plot()
+    air2d.plot();
 
 All 2d plots in xarray allow the use of the keyword arguments ``yincrease``
 and ``xincrease``.
 
-.. ipython:: python
-    :okwarning:
+.. jupyter-execute::
 
-    @savefig 2d_simple_yincrease.png width=4in
-    air2d.plot(yincrease=False)
+    air2d.plot(yincrease=False);
 
 .. note::
 
@@ -393,15 +355,11 @@ and ``xincrease``.
 
 Xarray plots data with :ref:`missing_values`.
 
-.. ipython:: python
-    :okwarning:
+.. jupyter-execute::
 
     bad_air2d = air2d.copy()
-
     bad_air2d[dict(lat=slice(0, 10), lon=slice(0, 25))] = np.nan
-
-    @savefig plotting_missing_values.png width=4in
-    bad_air2d.plot()
+    bad_air2d.plot();
 
 ========================
  Nonuniform Coordinates
@@ -411,15 +369,13 @@ It's not necessary for the coordinates t
 :py:func:`xarray.plot.pcolormesh` (default) and :py:func:`xarray.plot.contourf` can
 produce plots with nonuniform coordinates.
 
-.. ipython:: python
-    :okwarning:
+.. jupyter-execute::
 
     b = air2d.copy()
     # Apply a nonlinear transformation to one of the coords
     b.coords["lat"] = np.log(b.coords["lat"])
 
-    @savefig plotting_nonuniform_coords.png width=4in
-    b.plot()
+    b.plot();
 
 ====================
  Other types of plot
@@ -429,28 +385,22 @@ There are several other options for plot
 
 Contour plot using :py:meth:`DataArray.plot.contour()`
 
-.. ipython:: python
-    :okwarning:
+.. jupyter-execute::
 
-    @savefig plotting_contour.png width=4in
-    air2d.plot.contour()
+    air2d.plot.contour();
 
 Filled contour plot using :py:meth:`DataArray.plot.contourf()`
 
-.. ipython:: python
-    :okwarning:
+.. jupyter-execute::
 
-    @savefig plotting_contourf.png width=4in
-    air2d.plot.contourf()
+    air2d.plot.contourf();
 
 Surface plot using :py:meth:`DataArray.plot.surface()`
 
-.. ipython:: python
-    :okwarning:
+.. jupyter-execute::
 
-    @savefig plotting_surface.png width=4in
     # transpose just to make the example look a bit nicer
-    air2d.T.plot.surface()
+    air2d.T.plot.surface();
 
 ====================
  Calling Matplotlib
@@ -459,17 +409,12 @@ Surface plot using :py:meth:`DataArray.p
 Since this is a thin wrapper around matplotlib, all the functionality of
 matplotlib is available.
 
-.. ipython:: python
-    :okwarning:
+.. jupyter-execute::
 
     air2d.plot(cmap=plt.cm.Blues)
     plt.title("These colors prove North America\nhas fallen in the ocean")
     plt.ylabel("latitude")
-    plt.xlabel("longitude")
-    plt.tight_layout()
-
-    @savefig plotting_2d_call_matplotlib.png width=4in
-    plt.draw()
+    plt.xlabel("longitude");
 
 .. note::
 
@@ -479,14 +424,10 @@ matplotlib is available.
     In the example below, ``plt.xlabel`` effectively does nothing, since
     ``d_ylog.plot()`` updates the xlabel.
 
-    .. ipython:: python
-        :okwarning:
+    .. jupyter-execute::
 
         plt.xlabel("Never gonna see this.")
-        air2d.plot()
-
-        @savefig plotting_2d_call_matplotlib2.png width=4in
-        plt.draw()
+        air2d.plot();
 
 ===========
  Colormaps
@@ -495,11 +436,9 @@ matplotlib is available.
 Xarray borrows logic from Seaborn to infer what kind of color map to use. For
 example, consider the original data in Kelvins rather than Celsius:
 
-.. ipython:: python
-    :okwarning:
+.. jupyter-execute::
 
-    @savefig plotting_kelvin.png width=4in
-    airtemps.air.isel(time=0).plot()
+    airtemps.air.isel(time=0).plot();
 
 The Celsius data contain 0, so a diverging color map was used. The
 Kelvins do not have 0, so the default color map was used.
@@ -514,15 +453,13 @@ Outliers often have an extreme effect on
 Here we add two bad data points. This affects the color scale,
 washing out the plot.
 
-.. ipython:: python
-    :okwarning:
+.. jupyter-execute::
 
     air_outliers = airtemps.air.isel(time=0).copy()
     air_outliers[0, 0] = 100
     air_outliers[-1, -1] = 400
 
-    @savefig plotting_robust1.png width=4in
-    air_outliers.plot()
+    air_outliers.plot();
 
 This plot shows that we have outliers. The easy way to visualize
 the data without the outliers is to pass the parameter
@@ -530,11 +467,9 @@ the data without the outliers is to pass
 This will use the 2nd and 98th
 percentiles of the data to compute the color limits.
 
-.. ipython:: python
-    :okwarning:
+.. jupyter-execute::
 
-    @savefig plotting_robust2.png width=4in
-    air_outliers.plot(robust=True)
+    air_outliers.plot(robust=True);
 
 Observe that the ranges of the color bar have changed. The arrows on the
 color bar indicate
@@ -549,29 +484,23 @@ rather than the default continuous color
 ``levels`` keyword argument can be used to generate plots with discrete
 colormaps. For example, to make a plot with 8 discrete color intervals:
 
-.. ipython:: python
-    :okwarning:
+.. jupyter-execute::
 
-    @savefig plotting_discrete_levels.png width=4in
-    air2d.plot(levels=8)
+    air2d.plot(levels=8);
 
 It is also possible to use a list of levels to specify the boundaries of the
 discrete colormap:
 
-.. ipython:: python
-    :okwarning:
+.. jupyter-execute::
 
-    @savefig plotting_listed_levels.png width=4in
-    air2d.plot(levels=[0, 12, 18, 30])
+    air2d.plot(levels=[0, 12, 18, 30]);
 
 You can also specify a list of discrete colors through the ``colors`` argument:
 
-.. ipython:: python
-    :okwarning:
+.. jupyter-execute::
 
     flatui = ["#9b59b6", "#3498db", "#95a5a6", "#e74c3c", "#34495e", "#2ecc71"]
-    @savefig plotting_custom_colors_levels.png width=4in
-    air2d.plot(levels=[0, 12, 18, 30], colors=flatui)
+    air2d.plot(levels=[0, 12, 18, 30], colors=flatui);
 
 Finally, if you have `Seaborn <https://seaborn.pydata.org/>`_
 installed, you can also specify a seaborn color palette to the ``cmap``
@@ -579,12 +508,9 @@ argument. Note that ``levels`` *must* be
 if using ``imshow`` or ``pcolormesh`` (but not with ``contour`` or ``contourf``,
 since levels are chosen automatically).
 
-.. ipython:: python
-    :okwarning:
+.. jupyter-execute::
 
-    @savefig plotting_seaborn_palette.png width=4in
-    air2d.plot(levels=10, cmap="husl")
-    plt.draw()
+    air2d.plot(levels=10, cmap="husl");
 
 .. _plotting.faceting:
 
@@ -614,7 +540,7 @@ size of this dimension from 2920 -> 12.
 to just take a slice on that dimension.
 So let's use a slice to pick 6 times throughout the first year.
 
-.. ipython:: python
+.. jupyter-execute::
 
     t = air.isel(time=slice(0, 365 * 4, 250))
     t.coords
@@ -627,21 +553,17 @@ The easiest way to create faceted plots
 arguments to the xarray plotting methods/functions. This returns a
 :py:class:`xarray.plot.FacetGrid` object.
 
-.. ipython:: python
-    :okwarning:
+.. jupyter-execute::
 
-    @savefig plot_facet_dataarray.png
-    g_simple = t.plot(x="lon", y="lat", col="time", col_wrap=3)
+    g_simple = t.plot(x="lon", y="lat", col="time", col_wrap=3);
 
 Faceting also works for line plots.
 
-.. ipython:: python
-    :okwarning:
+.. jupyter-execute::
 
-    @savefig plot_facet_dataarray_line.png
     g_simple_line = t.isel(lat=slice(0, None, 4)).plot(
         x="lon", hue="lat", col="time", col_wrap=3
-    )
+    );
 
 ===============
  4 dimensional
@@ -652,16 +574,14 @@ Here we create a 4 dimensional array by
 a fixed amount. Now we can see how the temperature maps would compare if
 one were much hotter.
 
-.. ipython:: python
-    :okwarning:
+.. jupyter-execute::
 
     t2 = t.isel(time=slice(0, 2))
     t4d = xr.concat([t2, t2 + 40], pd.Index(["normal", "hot"], name="fourth_dim"))
     # This is a 4d array
     t4d.coords
 
-    @savefig plot_facet_4d.png
-    t4d.plot(x="lon", y="lat", col="time", row="fourth_dim")
+    t4d.plot(x="lon", y="lat", col="time", row="fourth_dim");
 
 ================
  Other features
@@ -669,19 +589,12 @@ one were much hotter.
 
 Faceted plotting supports other arguments common to xarray 2d plots.
 
-.. ipython:: python
-    :suppress:
-
-    plt.close("all")
-
-.. ipython:: python
-    :okwarning:
+.. jupyter-execute::
 
     hasoutliers = t.isel(time=slice(0, 5)).copy()
     hasoutliers[0, 0, 0] = -100
     hasoutliers[-1, -1, -1] = 400
 
-    @savefig plot_facet_robust.png
     g = hasoutliers.plot.pcolormesh(
         x="lon",
         y="lat",
@@ -704,25 +617,27 @@ It borrows an API and code from `Seaborn
 The structure is contained within the ``axs`` and ``name_dicts``
 attributes, both 2d NumPy object arrays.
 
-.. ipython:: python
+.. jupyter-execute::
 
     g.axs
 
+.. jupyter-execute::
+
     g.name_dicts
 
 It's possible to select the :py:class:`xarray.DataArray` or
 :py:class:`xarray.Dataset` corresponding to the FacetGrid through the
 ``name_dicts``.
 
-.. ipython:: python
+.. jupyter-execute::
 
     g.data.loc[g.name_dicts[0, 0]]
 
 Here is an example of using the lower level API and then modifying the axes after
 they have been plotted.
 
-.. ipython:: python
-    :okwarning:
+.. jupyter-execute::
+
 
     g = t.plot.imshow(x="lon", y="lat", col="time", col_wrap=3, robust=True)
 
@@ -730,10 +645,7 @@ they have been plotted.
         ax.set_title("Air Temperature %d" % i)
 
     bottomright = g.axs[-1, -1]
-    bottomright.annotate("bottom right", (240, 40))
-
-    @savefig plot_facet_iterator.png
-    plt.draw()
+    bottomright.annotate("bottom right", (240, 40));
 
 
 :py:class:`~xarray.plot.FacetGrid` objects have methods that let you customize the automatically generated
@@ -754,7 +666,7 @@ Datasets
 Xarray has limited support for plotting Dataset variables against each other.
 Consider this dataset
 
-.. ipython:: python
+.. jupyter-execute::
 
     ds = xr.tutorial.scatter_example_dataset(seed=42)
     ds
@@ -765,84 +677,67 @@ Scatter
 
 Let's plot the ``A`` DataArray as a function of the ``y`` coord
 
-.. ipython:: python
-    :okwarning:
+.. jupyter-execute::
 
-    ds.A
+    with xr.set_options(display_expand_data=False):
+        display(ds.A)
 
-    @savefig da_A_y.png
-    ds.A.plot.scatter(x="y")
+.. jupyter-execute::
+
+    ds.A.plot.scatter(x="y");
 
 Same plot can be displayed using the dataset:
 
-.. ipython:: python
-    :okwarning:
+.. jupyter-execute::
 
-    @savefig ds_A_y.png
-    ds.plot.scatter(x="y", y="A")
+    ds.plot.scatter(x="y", y="A");
 
 Now suppose we want to scatter the ``A`` DataArray against the ``B`` DataArray
 
-.. ipython:: python
-    :okwarning:
+.. jupyter-execute::
 
-    @savefig ds_simple_scatter.png
-    ds.plot.scatter(x="A", y="B")
+    ds.plot.scatter(x="A", y="B");
 
 The ``hue`` kwarg lets you vary the color by variable value
 
-.. ipython:: python
-    :okwarning:
+.. jupyter-execute::
 
-    @savefig ds_hue_scatter.png
-    ds.plot.scatter(x="A", y="B", hue="w")
+    ds.plot.scatter(x="A", y="B", hue="w");
 
 You can force a legend instead of a colorbar by setting ``add_legend=True, add_colorbar=False``.
 
-.. ipython:: python
-    :okwarning:
+.. jupyter-execute::
 
-    @savefig ds_discrete_legend_hue_scatter.png
-    ds.plot.scatter(x="A", y="B", hue="w", add_legend=True, add_colorbar=False)
+    ds.plot.scatter(x="A", y="B", hue="w", add_legend=True, add_colorbar=False);
 
-.. ipython:: python
-    :okwarning:
+.. jupyter-execute::
 
-    @savefig ds_discrete_colorbar_hue_scatter.png
-    ds.plot.scatter(x="A", y="B", hue="w", add_legend=False, add_colorbar=True)
+    ds.plot.scatter(x="A", y="B", hue="w", add_legend=False, add_colorbar=True);
 
 The ``markersize`` kwarg lets you vary the point's size by variable value.
 You can additionally pass ``size_norm`` to control how the variable's values are mapped to point sizes.
 
-.. ipython:: python
-    :okwarning:
+.. jupyter-execute::
 
-    @savefig ds_hue_size_scatter.png
-    ds.plot.scatter(x="A", y="B", hue="y", markersize="z")
+    ds.plot.scatter(x="A", y="B", hue="y", markersize="z");
 
 The ``z`` kwarg lets you plot the data along the z-axis as well.
 
-.. ipython:: python
-    :okwarning:
+.. jupyter-execute::
 
-    @savefig ds_hue_size_scatter_z.png
-    ds.plot.scatter(x="A", y="B", z="z", hue="y", markersize="x")
+    ds.plot.scatter(x="A", y="B", z="z", hue="y", markersize="x");
 
 Faceting is also possible
 
-.. ipython:: python
-    :okwarning:
+.. jupyter-execute::
 
-    @savefig ds_facet_scatter.png
-    ds.plot.scatter(x="A", y="B", hue="y", markersize="x", row="x", col="w")
+    ds.plot.scatter(x="A", y="B", hue="y", markersize="x", row="x", col="w");
 
 And adding the z-axis
 
-.. ipython:: python
-    :okwarning:
+.. jupyter-execute::
 
-    @savefig ds_facet_scatter_z.png
-    ds.plot.scatter(x="A", y="B", z="z", hue="y", markersize="x", row="x", col="w")
+    ds.plot.scatter(x="A", y="B", z="z", hue="y", markersize="x", row="x", col="w");
 
 For more advanced scatter plots, we recommend converting the relevant data variables
 to a pandas DataFrame and using the extensive plotting capabilities of ``seaborn``.
@@ -852,20 +747,16 @@ Quiver
 
 Visualizing vector fields is supported with quiver plots:
 
-.. ipython:: python
-    :okwarning:
+.. jupyter-execute::
 
-    @savefig ds_simple_quiver.png
-    ds.isel(w=1, z=1).plot.quiver(x="x", y="y", u="A", v="B")
+    ds.isel(w=1, z=1).plot.quiver(x="x", y="y", u="A", v="B");
 
 
 where ``u`` and ``v`` denote the x and y direction components of the arrow vectors. Again, faceting is also possible:
 
-.. ipython:: python
-    :okwarning:
+.. jupyter-execute::
 
-    @savefig ds_facet_quiver.png
-    ds.plot.quiver(x="x", y="y", u="A", v="B", col="w", row="z", scale=4)
+    ds.plot.quiver(x="x", y="y", u="A", v="B", col="w", row="z", scale=4);
 
 ``scale`` is required for faceted quiver plots.
 The scale determines the number of data units per arrow length unit, i.e. a smaller scale parameter makes the arrow longer.
@@ -875,21 +766,17 @@ Streamplot
 
 Visualizing vector fields is also supported with streamline plots:
 
-.. ipython:: python
-    :okwarning:
+.. jupyter-execute::
 
-    @savefig ds_simple_streamplot.png
-    ds.isel(w=1, z=1).plot.streamplot(x="x", y="y", u="A", v="B")
+    ds.isel(w=1, z=1).plot.streamplot(x="x", y="y", u="A", v="B");
 
 
 where ``u`` and ``v`` denote the x and y direction components of the vectors tangent to the streamlines.
 Again, faceting is also possible:
 
-.. ipython:: python
-    :okwarning:
+.. jupyter-execute::
 
-    @savefig ds_facet_streamplot.png
-    ds.plot.streamplot(x="x", y="y", u="A", v="B", col="w", row="z")
+    ds.plot.streamplot(x="x", y="y", u="A", v="B", col="w", row="z");
 
 .. _plot-maps:
 
@@ -900,10 +787,8 @@ To follow this section you'll need to ha
 
 This script will plot the air temperature on a map.
 
-.. ipython:: python
-    :okwarning:
-
-    import cartopy.crs as ccrs
+.. jupyter-execute::
+    :stderr:
 
     air = xr.tutorial.open_dataset("air_temperature").air
 
@@ -913,15 +798,13 @@ This script will plot the air temperatur
     )
     p.axes.set_global()
 
-    @savefig plotting_maps_cartopy.png width=100%
-    p.axes.coastlines()
+    p.axes.coastlines();
 
 When faceting on maps, the projection can be transferred to the ``plot``
 function using the ``subplot_kws`` keyword. The axes for the subplots created
 by faceting are accessible in the object returned by ``plot``:
 
-.. ipython:: python
-    :okwarning:
+.. jupyter-execute::
 
     p = air.isel(time=[0, 4]).plot(
         transform=ccrs.PlateCarree(),
@@ -931,8 +814,6 @@ by faceting are accessible in the object
     for ax in p.axs.flat:
         ax.coastlines()
         ax.gridlines()
-    @savefig plotting_maps_cartopy_facetting.png width=100%
-    plt.draw()
 
 
 Details
@@ -952,20 +833,14 @@ There are three ways to use the xarray p
 
 These are provided for user convenience; they all call the same code.
 
-.. ipython:: python
-    :okwarning:
-
-    import xarray.plot as xplt
+.. jupyter-execute::
 
     da = xr.DataArray(range(5))
     fig, axs = plt.subplots(ncols=2, nrows=2)
     da.plot(ax=axs[0, 0])
     da.plot.line(ax=axs[0, 1])
-    xplt.plot(da, ax=axs[1, 0])
-    xplt.line(da, ax=axs[1, 1])
-    plt.tight_layout()
-    @savefig plotting_ways_to_use.png width=6in
-    plt.draw()
+    xr.plot.plot(da, ax=axs[1, 0])
+    xr.plot.line(da, ax=axs[1, 1]);
 
 Here the output is the same. Since the data is 1 dimensional the line plot
 was used.
@@ -989,7 +864,7 @@ Coordinates
 If you'd like to find out what's really going on in the coordinate system,
 read on.
 
-.. ipython:: python
+.. jupyter-execute::
 
     a0 = xr.DataArray(np.zeros((4, 3, 2)), dims=("y", "x", "z"), name="temperature")
     a0[0, 0, 0] = 1
@@ -1002,11 +877,9 @@ Before reading on, you may want to look
 think carefully about what the limits, labels, and orientation for
 each of the axes should be.
 
-.. ipython:: python
-    :okwarning:
+.. jupyter-execute::
 
-    @savefig plotting_example_2d_simple.png width=4in
-    a.plot()
+    a.plot();
 
 It may seem strange that
 the values on the y axis are decreasing with -0.5 on the top. This is because
@@ -1023,8 +896,7 @@ You can plot irregular grids defined by
 xarray, but you'll have to tell the plot function to use these coordinates
 instead of the default ones:
 
-.. ipython:: python
-    :okwarning:
+.. jupyter-execute::
 
     lon, lat = np.meshgrid(np.linspace(-20, 20, 5), np.linspace(0, 30, 4))
     lon += lat / 10
@@ -1035,38 +907,32 @@ instead of the default ones:
         coords={"lat": (("y", "x"), lat), "lon": (("y", "x"), lon)},
     )
 
-    @savefig plotting_example_2d_irreg.png width=4in
-    da.plot.pcolormesh(x="lon", y="lat")
+    da.plot.pcolormesh(x="lon", y="lat");
 
 Note that in this case, xarray still follows the pixel centered convention.
 This might be undesirable in some cases, for example when your data is defined
 on a polar projection (:issue:`781`). This is why the default is to not follow
 this convention when plotting on a map:
 
-.. ipython:: python
-    :okwarning:
-
-    import cartopy.crs as ccrs
+.. jupyter-execute::
+    :stderr:
 
     ax = plt.subplot(projection=ccrs.PlateCarree())
     da.plot.pcolormesh(x="lon", y="lat", ax=ax)
     ax.scatter(lon, lat, transform=ccrs.PlateCarree())
     ax.coastlines()
-    @savefig plotting_example_2d_irreg_map.png width=4in
-    ax.gridlines(draw_labels=True)
+    ax.gridlines(draw_labels=True);
 
 You can however decide to infer the cell boundaries and use the
 ``infer_intervals`` keyword:
 
-.. ipython:: python
-    :okwarning:
+.. jupyter-execute::
 
     ax = plt.subplot(projection=ccrs.PlateCarree())
     da.plot.pcolormesh(x="lon", y="lat", ax=ax, infer_intervals=True)
     ax.scatter(lon, lat, transform=ccrs.PlateCarree())
     ax.coastlines()
-    @savefig plotting_example_2d_irreg_map_infer.png width=4in
-    ax.gridlines(draw_labels=True)
+    ax.gridlines(draw_labels=True);
 
 .. note::
     The data model of xarray does not support datasets with `cell boundaries`_
@@ -1077,10 +943,8 @@ You can however decide to infer the cell
 
 One can also make line plots with multidimensional coordinates. In this case, ``hue`` must be a dimension name, not a coordinate name.
 
-.. ipython:: python
-    :okwarning:
+.. jupyter-execute::
 
     f, ax = plt.subplots(2, 1)
     da.plot.line(x="lon", hue="y", ax=ax[0])
-    @savefig plotting_example_2d_hue_xy.png
-    da.plot.line(x="lon", hue="x", ax=ax[1])
+    da.plot.line(x="lon", hue="x", ax=ax[1]);
diff -pruN 2025.03.1-8/doc/user-guide/reshaping.rst 2025.10.1-1/doc/user-guide/reshaping.rst
--- 2025.03.1-8/doc/user-guide/reshaping.rst	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/doc/user-guide/reshaping.rst	2025-10-10 10:38:05.000000000 +0000
@@ -11,8 +11,8 @@ These methods are particularly useful fo
 Importing the library
 ---------------------
 
-.. ipython:: python
-    :suppress:
+.. jupyter-execute::
+    :hide-code:
 
     import numpy as np
     import pandas as pd
@@ -20,6 +20,11 @@ Importing the library
 
     np.random.seed(123456)
 
+    # Use defaults so we don't get gridlines in generated docs
+    import matplotlib as mpl
+
+    mpl.rcdefaults()
+
 Reordering dimensions
 ---------------------
 
@@ -27,11 +32,13 @@ To reorder dimensions on a :py:class:`~x
 on a :py:class:`~xarray.Dataset`, use :py:meth:`~xarray.DataArray.transpose`. An
 ellipsis (`...`) can be used to represent all other dimensions:
 
-.. ipython:: python
+.. jupyter-execute::
 
     ds = xr.Dataset({"foo": (("x", "y", "z"), [[[42]]]), "bar": (("y", "z"), [[24]])})
-    ds.transpose("y", "z", "x")
-    ds.transpose(..., "x")  # equivalent
+    ds.transpose("y", "z", "x") # equivalent to ds.transpose(..., "x")
+
+.. jupyter-execute::
+
     ds.transpose()  # reverses all dimensions
 
 Expand and squeeze dimensions
@@ -41,7 +48,7 @@ To expand a :py:class:`~xarray.DataArray
 variables on a :py:class:`~xarray.Dataset` along a new dimension,
 use :py:meth:`~xarray.DataArray.expand_dims`
 
-.. ipython:: python
+.. jupyter-execute::
 
     expanded = ds.expand_dims("w")
     expanded
@@ -52,7 +59,7 @@ To remove such a size-1 dimension from t
 or :py:class:`~xarray.Dataset`,
 use :py:meth:`~xarray.DataArray.squeeze`
 
-.. ipython:: python
+.. jupyter-execute::
 
     expanded.squeeze("w")
 
@@ -61,7 +68,7 @@ Converting between datasets and arrays
 
 To convert from a Dataset to a DataArray, use :py:meth:`~xarray.Dataset.to_dataarray`:
 
-.. ipython:: python
+.. jupyter-execute::
 
     arr = ds.to_dataarray()
     arr
@@ -73,20 +80,22 @@ coordinates.
 To convert back from a DataArray to a Dataset, use
 :py:meth:`~xarray.DataArray.to_dataset`:
 
-.. ipython:: python
+.. jupyter-execute::
 
     arr.to_dataset(dim="variable")
 
 The broadcasting behavior of ``to_dataarray`` means that the resulting array
 includes the union of data variable dimensions:
 
-.. ipython:: python
+.. jupyter-execute::
 
     ds2 = xr.Dataset({"a": 0, "b": ("x", [3, 4, 5])})
 
     # the input dataset has 4 elements
     ds2
 
+.. jupyter-execute::
+
     # the resulting array has 6 elements
     ds2.to_dataarray()
 
@@ -94,7 +103,7 @@ Otherwise, the result could not be repre
 
 If you use ``to_dataset`` without supplying the ``dim`` argument, the DataArray will be converted into a Dataset of one variable:
 
-.. ipython:: python
+.. jupyter-execute::
 
     arr.to_dataset(name="combined")
 
@@ -107,18 +116,21 @@ As part of xarray's nascent support for
 implemented :py:meth:`~xarray.DataArray.stack` and
 :py:meth:`~xarray.DataArray.unstack` method, for combining or splitting dimensions:
 
-.. ipython:: python
+.. jupyter-execute::
 
     array = xr.DataArray(
         np.random.randn(2, 3), coords=[("x", ["a", "b"]), ("y", [0, 1, 2])]
     )
     stacked = array.stack(z=("x", "y"))
     stacked
+
+.. jupyter-execute::
+
     stacked.unstack("z")
 
 As elsewhere in xarray, an ellipsis (`...`) can be used to represent all unlisted dimensions:
 
-.. ipython:: python
+.. jupyter-execute::
 
     stacked = array.stack(z=[..., "x"])
     stacked
@@ -131,19 +143,25 @@ Like :py:meth:`DataFrame.unstack<pandas.
 always succeeds, even if the multi-index being unstacked does not contain all
 possible levels. Missing levels are filled in with ``NaN`` in the resulting object:
 
-.. ipython:: python
+.. jupyter-execute::
 
     stacked2 = stacked[::2]
     stacked2
+
+.. jupyter-execute::
+
     stacked2.unstack("z")
 
 However, xarray's ``stack`` has an important difference from pandas: unlike
 pandas, it does not automatically drop missing values. Compare:
 
-.. ipython:: python
+.. jupyter-execute::
 
     array = xr.DataArray([[np.nan, 1], [2, 3]], dims=["x", "y"])
     array.stack(z=("x", "y"))
+
+.. jupyter-execute::
+
     array.to_pandas().stack()
 
 We departed from pandas's behavior here because predictable shapes for new
@@ -171,15 +189,21 @@ Just as with :py:meth:`xarray.Dataset.st
 represented by a :py:class:`pandas.MultiIndex` object. These methods are used
 like this:
 
-.. ipython:: python
+.. jupyter-execute::
 
     data = xr.Dataset(
         data_vars={"a": (("x", "y"), [[0, 1, 2], [3, 4, 5]]), "b": ("x", [6, 7])},
         coords={"y": ["u", "v", "w"]},
     )
     data
+
+.. jupyter-execute::
+
     stacked = data.to_stacked_array("z", sample_dims=["x"])
     stacked
+
+.. jupyter-execute::
+
     unstacked = stacked.to_unstacked_dataset("z")
     unstacked
 
@@ -206,7 +230,7 @@ multi-indexes without modifying the data
 You can create a multi-index from several 1-dimensional variables and/or
 coordinates using :py:meth:`~xarray.DataArray.set_index`:
 
-.. ipython:: python
+.. jupyter-execute::
 
     da = xr.DataArray(
         np.random.rand(4),
@@ -217,12 +241,15 @@ coordinates using :py:meth:`~xarray.Data
         dims="x",
     )
     da
+
+.. jupyter-execute::
+
     mda = da.set_index(x=["band", "wavenumber"])
     mda
 
 These coordinates can now be used for indexing, e.g.,
 
-.. ipython:: python
+.. jupyter-execute::
 
     mda.sel(band="a")
 
@@ -230,14 +257,14 @@ Conversely, you can use :py:meth:`~xarra
 to extract multi-index levels as coordinates (this is mainly useful
 for serialization):
 
-.. ipython:: python
+.. jupyter-execute::
 
     mda.reset_index("x")
 
 :py:meth:`~xarray.DataArray.reorder_levels` allows changing the order
 of multi-index levels:
 
-.. ipython:: python
+.. jupyter-execute::
 
     mda.reorder_levels(x=["wavenumber", "band"])
 
@@ -245,12 +272,18 @@ As of xarray v0.9 coordinate labels for
 You can also use ``.set_index`` / ``.reset_index`` to add / remove
 labels for one or several dimensions:
 
-.. ipython:: python
+.. jupyter-execute::
 
     array = xr.DataArray([1, 2, 3], dims="x")
     array
+
+.. jupyter-execute::
+
     array["c"] = ("x", ["a", "b", "c"])
     array.set_index(x="c")
+
+.. jupyter-execute::
+
     array = array.set_index(x="c")
     array = array.reset_index("x", drop=True)
 
@@ -262,10 +295,13 @@ Shift and roll
 To adjust coordinate labels, you can use the :py:meth:`~xarray.Dataset.shift` and
 :py:meth:`~xarray.Dataset.roll` methods:
 
-.. ipython:: python
+.. jupyter-execute::
 
     array = xr.DataArray([1, 2, 3, 4], dims="x")
     array.shift(x=2)
+
+.. jupyter-execute::
+
     array.roll(x=2, roll_coords=True)
 
 .. _reshape.sort:
@@ -277,7 +313,7 @@ One may sort a DataArray/Dataset via :py
 :py:meth:`~xarray.Dataset.sortby`. The input can be an individual or list of
 1D ``DataArray`` objects:
 
-.. ipython:: python
+.. jupyter-execute::
 
     ds = xr.Dataset(
         {
@@ -292,10 +328,16 @@ One may sort a DataArray/Dataset via :py
 
 As a shortcut, you can refer to existing coordinates by name:
 
-.. ipython:: python
+.. jupyter-execute::
 
     ds.sortby("x")
+
+.. jupyter-execute::
+
     ds.sortby(["y", "x"])
+
+.. jupyter-execute::
+
     ds.sortby(["y", "x"], ascending=False)
 
 .. _reshape.coarsen:
@@ -309,41 +351,32 @@ it can also be used to reorganise your d
 
 Taking our example tutorial air temperature dataset over the Northern US
 
-.. ipython:: python
-    :suppress:
-
-    # Use defaults so we don't get gridlines in generated docs
-    import matplotlib as mpl
-
-    mpl.rcdefaults()
-
-.. ipython:: python
+.. jupyter-execute::
 
     air = xr.tutorial.open_dataset("air_temperature")["air"]
 
-    @savefig pre_coarsening.png
-    air.isel(time=0).plot(x="lon", y="lat")
+    air.isel(time=0).plot(x="lon", y="lat");
 
 we can split this up into sub-regions of size ``(9, 18)`` points using :py:meth:`~xarray.computation.rolling.DataArrayCoarsen.construct`:
 
-.. ipython:: python
+.. jupyter-execute::
 
     regions = air.coarsen(lat=9, lon=18, boundary="pad").construct(
         lon=("x_coarse", "x_fine"), lat=("y_coarse", "y_fine")
     )
-    regions
+    with xr.set_options(display_expand_data=False):
+        regions
 
 9 new regions have been created, each of size 9 by 18 points.
 The ``boundary="pad"`` kwarg ensured that all regions are the same size even though the data does not evenly divide into these sizes.
 
 By plotting these 9 regions together via :ref:`faceting<plotting.faceting>` we can see how they relate to the original data.
 
-.. ipython:: python
+.. jupyter-execute::
 
-    @savefig post_coarsening.png
     regions.isel(time=0).plot(
         x="x_fine", y="y_fine", col="x_coarse", row="y_coarse", yincrease=False
-    )
+    );
 
 We are now free to easily apply any custom computation to each coarsened region of our new dataarray.
 This would involve specifying that applied functions should act over the ``"x_fine"`` and ``"y_fine"`` dimensions,
diff -pruN 2025.03.1-8/doc/user-guide/terminology.rst 2025.10.1-1/doc/user-guide/terminology.rst
--- 2025.03.1-8/doc/user-guide/terminology.rst	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/doc/user-guide/terminology.rst	2025-10-10 10:38:05.000000000 +0000
@@ -9,6 +9,12 @@ pandas; so we've put together a glossary
 *refers to an xarray* :py:class:`DataArray` *in the examples. For more
 complete examples, please consult the relevant documentation.*
 
+.. jupyter-execute::
+    :hide-code:
+
+    import numpy as np
+    import xarray as xr
+
 .. glossary::
 
     DataArray
@@ -131,17 +137,11 @@ complete examples, please consult the re
 
         __ https://numpy.org/neps/nep-0022-ndarray-duck-typing-overview.html
 
-        .. ipython:: python
-            :suppress:
-
-            import numpy as np
-            import xarray as xr
-
     Aligning
         Aligning refers to the process of ensuring that two or more DataArrays or Datasets
         have the same dimensions and coordinates, so that they can be combined or compared properly.
 
-        .. ipython:: python
+        .. jupyter-execute::
 
             x = xr.DataArray(
                 [[25, 35], [10, 24]],
@@ -153,15 +153,18 @@ complete examples, please consult the re
                 dims=("lat", "lon"),
                 coords={"lat": [35.0, 42.0], "lon": [100.0, 120.0]},
             )
-            x
-            y
+            a, b = xr.align(x, y)
+
+            # By default, an "inner join" is performed
+            # so "a" is a copy of "x" where coordinates match "y"
+            a
 
     Broadcasting
         A technique that allows operations to be performed on arrays with different shapes and dimensions.
         When performing operations on arrays with different shapes and dimensions, xarray will automatically attempt to broadcast the
         arrays to a common shape before the operation is applied.
 
-        .. ipython:: python
+        .. jupyter-execute::
 
             # 'a' has shape (3,) and 'b' has shape (4,)
             a = xr.DataArray(np.array([1, 2, 3]), dims=["x"])
@@ -175,7 +178,7 @@ complete examples, please consult the re
         the same dimensions. When merging, xarray aligns the variables and coordinates of the different datasets along
         the specified dimensions and creates a new ``Dataset`` containing all the variables and coordinates.
 
-        .. ipython:: python
+        .. jupyter-execute::
 
             # create two 1D arrays with names
             arr1 = xr.DataArray(
@@ -194,7 +197,7 @@ complete examples, please consult the re
         xarray arranges the datasets or dataarrays along a new dimension, and the resulting ``Dataset`` or ``Dataarray``
         will have the same variables and coordinates along the other dimensions.
 
-        .. ipython:: python
+        .. jupyter-execute::
 
             a = xr.DataArray([[1, 2], [3, 4]], dims=("x", "y"))
             b = xr.DataArray([[5, 6], [7, 8]], dims=("x", "y"))
@@ -205,7 +208,7 @@ complete examples, please consult the re
         Combining is the process of arranging two or more DataArrays or Datasets into a single ``DataArray`` or
         ``Dataset`` using some combination of merging and concatenation operations.
 
-        .. ipython:: python
+        .. jupyter-execute::
 
             ds1 = xr.Dataset(
                 {"data": xr.DataArray([[1, 2], [3, 4]], dims=("x", "y"))},
@@ -217,7 +220,7 @@ complete examples, please consult the re
             )
 
             # combine the datasets
-            combined_ds = xr.combine_by_coords([ds1, ds2])
+            combined_ds = xr.combine_by_coords([ds1, ds2], join="outer")
             combined_ds
 
     lazy
diff -pruN 2025.03.1-8/doc/user-guide/testing.rst 2025.10.1-1/doc/user-guide/testing.rst
--- 2025.03.1-8/doc/user-guide/testing.rst	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/doc/user-guide/testing.rst	2025-10-10 10:38:05.000000000 +0000
@@ -3,8 +3,8 @@
 Testing your code
 =================
 
-.. ipython:: python
-    :suppress:
+.. jupyter-execute::
+    :hide-code:
 
     import numpy as np
     import pandas as pd
@@ -55,7 +55,7 @@ These strategies are accessible in the :
 
 These build upon the numpy and array API strategies offered in :py:mod:`hypothesis.extra.numpy` and :py:mod:`hypothesis.extra.array_api`:
 
-.. ipython:: python
+.. jupyter-execute::
 
     import hypothesis.extra.numpy as npst
 
@@ -65,12 +65,18 @@ Generating Examples
 To see an example of what each of these strategies might produce, you can call one followed by the ``.example()`` method,
 which is a general hypothesis method valid for all strategies.
 
-.. ipython:: python
+.. jupyter-execute::
 
     import xarray.testing.strategies as xrst
 
     xrst.variables().example()
+
+.. jupyter-execute::
+
     xrst.variables().example()
+
+.. jupyter-execute::
+
     xrst.variables().example()
 
 You can see that calling ``.example()`` multiple times will generate different examples, giving you an idea of the wide
@@ -79,11 +85,11 @@ range of data that the xarray strategies
 In your tests however you should not use ``.example()`` - instead you should parameterize your tests with the
 :py:func:`hypothesis.given` decorator:
 
-.. ipython:: python
+.. jupyter-execute::
 
     from hypothesis import given
 
-.. ipython:: python
+.. jupyter-execute::
 
     @given(xrst.variables())
     def test_function_that_acts_on_variables(var):
@@ -96,7 +102,7 @@ Chaining Strategies
 Xarray's strategies can accept other strategies as arguments, allowing you to customise the contents of the generated
 examples.
 
-.. ipython:: python
+.. jupyter-execute::
 
     # generate a Variable containing an array with a complex number dtype, but all other details still arbitrary
     from hypothesis.extra.numpy import complex_number_dtypes
@@ -112,7 +118,7 @@ Fixing Arguments
 If you want to fix one aspect of the data structure, whilst allowing variation in the generated examples
 over all other aspects, then use :py:func:`hypothesis.strategies.just()`.
 
-.. ipython:: python
+.. jupyter-execute::
 
     import hypothesis.strategies as st
 
@@ -125,14 +131,14 @@ special strategy that just contains a si
 To fix the length of dimensions you can instead pass ``dims`` as a mapping of dimension names to lengths
 (i.e. following xarray objects' ``.sizes()`` property), e.g.
 
-.. ipython:: python
+.. jupyter-execute::
 
     # Generates only variables with dimensions ["x", "y"], of lengths 2 & 3 respectively
     xrst.variables(dims=st.just({"x": 2, "y": 3})).example()
 
 You can also use this to specify that you want examples which are missing some part of the data structure, for instance
 
-.. ipython:: python
+.. jupyter-execute::
 
     # Generates a Variable with no attributes
     xrst.variables(attrs=st.just({})).example()
@@ -140,16 +146,20 @@ You can also use this to specify that yo
 Through a combination of chaining strategies and fixing arguments, you can specify quite complicated requirements on the
 objects your chained strategy will generate.
 
-.. ipython:: python
+.. jupyter-execute::
 
     fixed_x_variable_y_maybe_z = st.fixed_dictionaries(
         {"x": st.just(2), "y": st.integers(3, 4)}, optional={"z": st.just(2)}
     )
     fixed_x_variable_y_maybe_z.example()
 
-    special_variables = xrst.variables(dims=fixed_x_variable_y_maybe_z)
+.. jupyter-execute::
 
+    special_variables = xrst.variables(dims=fixed_x_variable_y_maybe_z)
     special_variables.example()
+
+.. jupyter-execute::
+
     special_variables.example()
 
 Here we have used one of hypothesis' built-in strategies :py:func:`hypothesis.strategies.fixed_dictionaries` to create a
@@ -171,27 +181,30 @@ Imagine we want to write a strategy whic
 1. Create a xarray object with numpy data and use the hypothesis' ``.map()`` method to convert the underlying array to a
 different type:
 
-.. ipython:: python
+.. jupyter-execute::
 
     import sparse
 
-.. ipython:: python
+.. jupyter-execute::
 
     def convert_to_sparse(var):
         return var.copy(data=sparse.COO.from_numpy(var.to_numpy()))
 
-.. ipython:: python
+.. jupyter-execute::
 
     sparse_variables = xrst.variables(dims=xrst.dimension_names(min_dims=1)).map(
         convert_to_sparse
     )
 
     sparse_variables.example()
+
+.. jupyter-execute::
+
     sparse_variables.example()
 
 2. Pass a function which returns a strategy which generates the duck-typed arrays directly to the ``array_strategy_fn`` argument of the xarray strategies:
 
-.. ipython:: python
+.. jupyter-execute::
 
     def sparse_random_arrays(shape: tuple[int, ...]) -> sparse._coo.core.COO:
         """Strategy which generates random sparse.COO arrays"""
@@ -210,7 +223,7 @@ different type:
         return sparse_random_arrays(shape=shape)
 
 
-.. ipython:: python
+.. jupyter-execute::
 
     sparse_random_variables = xrst.variables(
         array_strategy_fn=sparse_random_arrays_fn, dtype=st.just(np.dtype("float64"))
@@ -238,7 +251,7 @@ If the array type you want to generate h
 (e.g. that which is conventionally imported as ``xp`` or similar),
 you can use this neat trick:
 
-.. ipython:: python
+.. jupyter-execute::
 
     import numpy as xp  # compatible in numpy 2.0
 
@@ -265,18 +278,24 @@ is useful.
 
 It works for lists of dimension names
 
-.. ipython:: python
+.. jupyter-execute::
 
     dims = ["x", "y", "z"]
     xrst.unique_subset_of(dims).example()
+
+.. jupyter-execute::
+
     xrst.unique_subset_of(dims).example()
 
 as well as for mappings of dimension names to sizes
 
-.. ipython:: python
+.. jupyter-execute::
 
     dim_sizes = {"x": 2, "y": 3, "z": 4}
     xrst.unique_subset_of(dim_sizes).example()
+
+.. jupyter-execute::
+
     xrst.unique_subset_of(dim_sizes).example()
 
 This is useful because operations like reductions can be performed over any subset of the xarray object's dimensions.
diff -pruN 2025.03.1-8/doc/user-guide/time-series.rst 2025.10.1-1/doc/user-guide/time-series.rst
--- 2025.03.1-8/doc/user-guide/time-series.rst	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/doc/user-guide/time-series.rst	2025-10-10 10:38:05.000000000 +0000
@@ -1,3 +1,5 @@
+.. currentmodule:: xarray
+
 .. _time-series:
 
 ================
@@ -9,8 +11,8 @@ Accordingly, we've copied many of featur
 data in pandas such a joy to xarray. In most cases, we rely on pandas for the
 core functionality.
 
-.. ipython:: python
-    :suppress:
+.. jupyter-execute::
+    :hide-code:
 
     import numpy as np
     import pandas as pd
@@ -21,27 +23,29 @@ core functionality.
 Creating datetime64 data
 ------------------------
 
-Xarray uses the numpy dtypes ``datetime64[unit]`` and ``timedelta64[unit]``
-(where unit is one of ``"s"``, ``"ms"``, ``"us"`` and ``"ns"``) to represent datetime
+Xarray uses the numpy dtypes :py:class:`numpy.datetime64` and :py:class:`numpy.timedelta64`
+with specified units (one of ``"s"``, ``"ms"``, ``"us"`` and ``"ns"``) to represent datetime
 data, which offer vectorized operations with numpy and smooth integration with pandas.
 
-To convert to or create regular arrays of ``datetime64`` data, we recommend
-using :py:func:`pandas.to_datetime` and :py:func:`pandas.date_range`:
+To convert to or create regular arrays of :py:class:`numpy.datetime64` data, we recommend
+using :py:func:`pandas.to_datetime`, :py:class:`pandas.DatetimeIndex`, or :py:func:`xarray.date_range`:
 
-.. ipython:: python
+.. jupyter-execute::
 
     pd.to_datetime(["2000-01-01", "2000-02-02"])
+
+.. jupyter-execute::
+
     pd.DatetimeIndex(
         ["2000-01-01 00:00:00", "2000-02-02 00:00:00"], dtype="datetime64[s]"
     )
-    pd.date_range("2000-01-01", periods=365)
-    pd.date_range("2000-01-01", periods=365, unit="s")
-
-It is also possible to use corresponding :py:func:`xarray.date_range`:
 
-.. ipython:: python
+.. jupyter-execute::
 
     xr.date_range("2000-01-01", periods=365)
+
+.. jupyter-execute::
+
     xr.date_range("2000-01-01", periods=365, unit="s")
 
 
@@ -56,7 +60,7 @@ It is also possible to use corresponding
 Alternatively, you can supply arrays of Python ``datetime`` objects. These get
 converted automatically when used as arguments in xarray objects (with us-resolution):
 
-.. ipython:: python
+.. jupyter-execute::
 
     import datetime
 
@@ -81,20 +85,23 @@ attribute like ``'days since 2000-01-01'
 
 
 You can manual decode arrays in this form by passing a dataset to
-:py:func:`~xarray.decode_cf`:
+:py:func:`decode_cf`:
 
-.. ipython:: python
+.. jupyter-execute::
 
     attrs = {"units": "hours since 2000-01-01"}
     ds = xr.Dataset({"time": ("time", [0, 1, 2, 3], attrs)})
     # Default decoding to 'ns'-resolution
     xr.decode_cf(ds)
+
+.. jupyter-execute::
+
     # Decoding to 's'-resolution
     coder = xr.coders.CFDatetimeCoder(time_unit="s")
     xr.decode_cf(ds, decode_times=coder)
 
-From xarray 2025.01.2 the resolution of the dates can be one of ``"s"``, ``"ms"``, ``"us"`` or ``"ns"``. One limitation of using ``datetime64[ns]`` is that it limits the native representation of dates to those that fall between the years 1678 and 2262, which gets increased significantly with lower resolutions. When a store contains dates outside of these bounds (or dates < `1582-10-15`_ with a Gregorian, also known as standard, calendar), dates will be returned as arrays of :py:class:`cftime.datetime` objects and a :py:class:`~xarray.CFTimeIndex` will be used for indexing.
-:py:class:`~xarray.CFTimeIndex` enables most of the indexing functionality of a :py:class:`pandas.DatetimeIndex`.
+From xarray 2025.01.2 the resolution of the dates can be one of ``"s"``, ``"ms"``, ``"us"`` or ``"ns"``. One limitation of using ``datetime64[ns]`` is that it limits the native representation of dates to those that fall between the years 1678 and 2262, which gets increased significantly with lower resolutions. When a store contains dates outside of these bounds (or dates < `1582-10-15`_ with a Gregorian, also known as standard, calendar), dates will be returned as arrays of :py:class:`cftime.datetime` objects and a :py:class:`CFTimeIndex` will be used for indexing.
+:py:class:`CFTimeIndex` enables most of the indexing functionality of a :py:class:`pandas.DatetimeIndex`.
 See :ref:`CFTimeIndex` for more information.
 
 Datetime indexing
@@ -106,17 +113,20 @@ This allows for several useful and succi
 ``datetime64`` data. For example, we support indexing with strings for single
 items and with the ``slice`` object:
 
-.. ipython:: python
+.. jupyter-execute::
 
     time = pd.date_range("2000-01-01", freq="h", periods=365 * 24)
     ds = xr.Dataset({"foo": ("time", np.arange(365 * 24)), "time": time})
     ds.sel(time="2000-01")
+
+.. jupyter-execute::
+
     ds.sel(time=slice("2000-06-01", "2000-06-10"))
 
 You can also select a particular time by indexing with a
 :py:class:`datetime.time` object:
 
-.. ipython:: python
+.. jupyter-execute::
 
     ds.sel(time=datetime.time(12))
 
@@ -132,11 +142,14 @@ given ``DataArray`` can be quickly compu
 
 .. _pandas accessors: https://pandas.pydata.org/pandas-docs/stable/basics.html#basics-dt-accessors
 
-.. ipython:: python
+.. jupyter-execute::
 
     time = pd.date_range("2000-01-01", freq="6h", periods=365 * 4)
     ds = xr.Dataset({"foo": ("time", np.arange(365 * 4)), "time": time})
     ds.time.dt.hour
+
+.. jupyter-execute::
+
     ds.time.dt.dayofweek
 
 The ``.dt`` accessor works on both coordinate dimensions as well as
@@ -149,17 +162,23 @@ and "quarter":
 
 __ https://pandas.pydata.org/pandas-docs/stable/api.html#time-date-components
 
-.. ipython:: python
+.. jupyter-execute::
 
     ds["time.month"]
+
+.. jupyter-execute::
+
     ds["time.dayofyear"]
 
 For use as a derived coordinate, xarray adds ``'season'`` to the list of
 datetime components supported by pandas:
 
-.. ipython:: python
+.. jupyter-execute::
 
     ds["time.season"]
+
+.. jupyter-execute::
+
     ds["time"].dt.season
 
 The set of valid seasons consists of 'DJF', 'MAM', 'JJA' and 'SON', labeled by
@@ -171,7 +190,7 @@ In addition, xarray supports rounding op
 
 __ https://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases
 
-.. ipython:: python
+.. jupyter-execute::
 
     ds["time"].dt.floor("D")
 
@@ -180,7 +199,7 @@ for arrays utilising the same formatting
 
 .. _datetime.strftime: https://docs.python.org/3/library/datetime.html#strftime-strptime-behavior
 
-.. ipython:: python
+.. jupyter-execute::
 
     ds["time"].dt.strftime("%a, %b %d %H:%M")
 
@@ -190,13 +209,13 @@ Indexing Using Datetime Components
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 You can use use the ``.dt`` accessor when subsetting your data as well. For example, we can subset for the month of January using the following:
 
-.. ipython:: python
+.. jupyter-execute::
 
     ds.isel(time=(ds.time.dt.month == 1))
 
 You can also search for multiple months (in this case January through March), using ``isin``:
 
-.. ipython:: python
+.. jupyter-execute::
 
     ds.isel(time=ds.time.dt.month.isin([1, 2, 3]))
 
@@ -205,42 +224,44 @@ You can also search for multiple months
 Resampling and grouped operations
 ---------------------------------
 
-Datetime components couple particularly well with grouped operations (see
-:ref:`groupby`) for analyzing features that repeat over time. Here's how to
-calculate the mean by time of day:
 
-.. ipython:: python
-    :okwarning:
+.. seealso::
+
+   For more generic documentation on grouping, see :ref:`groupby`.
+
+
+Datetime components couple particularly well with grouped operations for analyzing features that repeat over time.
+Here's how to calculate the mean by time of day:
+
+.. jupyter-execute::
 
     ds.groupby("time.hour").mean()
 
 For upsampling or downsampling temporal resolutions, xarray offers a
-:py:meth:`~xarray.Dataset.resample` method building on the core functionality
+:py:meth:`Dataset.resample` method building on the core functionality
 offered by the pandas method of the same name. Resample uses essentially the
-same api as ``resample`` `in pandas`_.
+same api as :py:meth:`pandas.DataFrame.resample` `in pandas`_.
 
 .. _in pandas: https://pandas.pydata.org/pandas-docs/stable/timeseries.html#up-and-downsampling
 
 For example, we can downsample our dataset from hourly to 6-hourly:
 
-.. ipython:: python
-    :okwarning:
+.. jupyter-execute::
 
     ds.resample(time="6h")
 
-This will create a specialized ``Resample`` object which saves information
-necessary for resampling. All of the reduction methods which work with
-``Resample`` objects can also be used for resampling:
+This will create a specialized :py:class:`~xarray.core.resample.DatasetResample` or :py:class:`~xarray.core.resample.DataArrayResample`
+object which saves information necessary for resampling. All of the reduction methods which work with
+:py:class:`Dataset` or :py:class:`DataArray` objects can also be used for resampling:
 
-.. ipython:: python
-    :okwarning:
+.. jupyter-execute::
 
     ds.resample(time="6h").mean()
 
 You can also supply an arbitrary reduction function to aggregate over each
 resampling group:
 
-.. ipython:: python
+.. jupyter-execute::
 
     ds.resample(time="6h").reduce(np.mean)
 
@@ -252,7 +273,7 @@ by specifying the ``dim`` keyword argume
     ds.resample(time="6h").mean(dim=["time", "latitude", "longitude"])
 
 For upsampling, xarray provides six methods: ``asfreq``, ``ffill``, ``bfill``, ``pad``,
-``nearest`` and ``interpolate``. ``interpolate`` extends ``scipy.interpolate.interp1d``
+``nearest`` and ``interpolate``. ``interpolate`` extends :py:func:`scipy.interpolate.interp1d`
 and supports all of its schemes. All of these resampling operations work on both
 Dataset and DataArray objects with an arbitrary number of dimensions.
 
@@ -260,22 +281,97 @@ In order to limit the scope of the metho
 ``nearest`` the ``tolerance`` argument can be set in coordinate units.
 Data that has indices outside of the given ``tolerance`` are set to ``NaN``.
 
-.. ipython:: python
+.. jupyter-execute::
 
     ds.resample(time="1h").nearest(tolerance="1h")
 
 It is often desirable to center the time values after a resampling operation.
 That can be accomplished by updating the resampled dataset time coordinate values
-using time offset arithmetic via the `pandas.tseries.frequencies.to_offset`_ function.
-
-.. _pandas.tseries.frequencies.to_offset: https://pandas.pydata.org/docs/reference/api/pandas.tseries.frequencies.to_offset.html
+using time offset arithmetic via the :py:func:`pandas.tseries.frequencies.to_offset` function.
 
-.. ipython:: python
+.. jupyter-execute::
 
     resampled_ds = ds.resample(time="6h").mean()
     offset = pd.tseries.frequencies.to_offset("6h") / 2
     resampled_ds["time"] = resampled_ds.get_index("time") + offset
     resampled_ds
 
-For more examples of using grouped operations on a time dimension, see
-:doc:`../examples/weather-data`.
+
+.. seealso::
+
+   For more examples of using grouped operations on a time dimension, see :doc:`../examples/weather-data`.
+
+
+.. _seasonal_grouping:
+
+Handling Seasons
+~~~~~~~~~~~~~~~~
+
+Two extremely common time series operations are to group by seasons, and resample to a seasonal frequency.
+Xarray has historically supported some simple versions of these computations.
+For example, ``.groupby("time.season")`` (where the seasons are DJF, MAM, JJA, SON)
+and resampling to a seasonal frequency using Pandas syntax: ``.resample(time="QS-DEC")``.
+
+Quite commonly one wants more flexibility in defining seasons. For these use-cases, Xarray provides
+:py:class:`groupers.SeasonGrouper` and :py:class:`groupers.SeasonResampler`.
+
+
+.. currentmodule:: xarray.groupers
+
+.. jupyter-execute::
+
+    from xarray.groupers import SeasonGrouper
+
+    ds.groupby(time=SeasonGrouper(["DJF", "MAM", "JJA", "SON"])).mean()
+
+
+Note how the seasons are in the specified order, unlike ``.groupby("time.season")`` where the
+seasons are sorted alphabetically.
+
+.. jupyter-execute::
+
+    ds.groupby("time.season").mean()
+
+
+:py:class:`SeasonGrouper` supports overlapping seasons:
+
+.. jupyter-execute::
+
+    ds.groupby(time=SeasonGrouper(["DJFM", "MAMJ", "JJAS", "SOND"])).mean()
+
+
+Skipping months is allowed:
+
+.. jupyter-execute::
+
+    ds.groupby(time=SeasonGrouper(["JJAS"])).mean()
+
+
+Use :py:class:`SeasonResampler` to specify custom seasons.
+
+.. jupyter-execute::
+
+    from xarray.groupers import SeasonResampler
+
+    ds.resample(time=SeasonResampler(["DJF", "MAM", "JJA", "SON"])).mean()
+
+
+:py:class:`SeasonResampler` is smart enough to correctly handle years for seasons that
+span the end of the year (e.g. DJF). By default :py:class:`SeasonResampler` will skip any
+season that is incomplete (e.g. the first DJF season for a time series that starts in Jan).
+Pass the ``drop_incomplete=False`` kwarg to :py:class:`SeasonResampler` to disable this behaviour.
+
+.. jupyter-execute::
+
+    from xarray.groupers import SeasonResampler
+
+    ds.resample(
+        time=SeasonResampler(["DJF", "MAM", "JJA", "SON"], drop_incomplete=False)
+    ).mean()
+
+
+Seasons need not be of the same length:
+
+.. jupyter-execute::
+
+    ds.resample(time=SeasonResampler(["JF", "MAM", "JJAS", "OND"])).mean()
diff -pruN 2025.03.1-8/doc/user-guide/weather-climate.rst 2025.10.1-1/doc/user-guide/weather-climate.rst
--- 2025.03.1-8/doc/user-guide/weather-climate.rst	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/doc/user-guide/weather-climate.rst	2025-10-10 10:38:05.000000000 +0000
@@ -5,10 +5,11 @@
 Weather and climate data
 ========================
 
-.. ipython:: python
-    :suppress:
+.. jupyter-execute::
+    :hide-code:
 
     import xarray as xr
+    import numpy as np
 
 Xarray can leverage metadata that follows the `Climate and Forecast (CF) conventions`_ if present. Examples include :ref:`automatic labelling of plots<plotting>` with descriptive names and units if proper metadata is present and support for non-standard calendars used in climate science through the ``cftime`` module (explained in the :ref:`CFTimeIndex` section). There are also a number of :ref:`geosciences-focused projects that build on xarray<ecosystem>`.
 
@@ -53,7 +54,7 @@ CF-compliant coordinate variables
 
 .. _`MetPy`: https://unidata.github.io/MetPy/dev/index.html
 .. _`metpy documentation`:	https://unidata.github.io/MetPy/dev/tutorials/xarray_tutorial.html#coordinates
-.. _`Cartopy`: https://scitools.org.uk/cartopy/docs/latest/crs/projections.html
+.. _`Cartopy`: https://cartopy.readthedocs.io/stable/reference/crs.html
 
 .. _CFTimeIndex:
 
@@ -87,7 +88,7 @@ For example, you can create a DataArray
 coordinate with dates from a no-leap calendar and a
 :py:class:`~xarray.CFTimeIndex` will automatically be used:
 
-.. ipython:: python
+.. jupyter-execute::
 
     from itertools import product
     from cftime import DatetimeNoLeap
@@ -105,7 +106,7 @@ instance, we can create the same dates a
 :py:class:`~xarray.CFTimeIndex` for non-standard calendars, but can be nice
 to use to be explicit):
 
-.. ipython:: python
+.. jupyter-execute::
 
     dates = xr.date_range(
         start="0001", periods=24, freq="MS", calendar="noleap", use_cftime=True
@@ -117,7 +118,7 @@ infer the sampling frequency of a :py:cl
 :py:class:`~xarray.DataArray` containing cftime objects. It also works transparently with
 ``np.datetime64`` and ``np.timedelta64`` data (with "s", "ms", "us" or "ns" resolution).
 
-.. ipython:: python
+.. jupyter-execute::
 
     xr.infer_freq(dates)
 
@@ -128,9 +129,12 @@ using the same formatting as the standar
 
 .. _datetime.strftime: https://docs.python.org/3/library/datetime.html#strftime-strptime-behavior
 
-.. ipython:: python
+.. jupyter-execute::
 
     dates.strftime("%c")
+
+.. jupyter-execute::
+
     da["time"].dt.strftime("%Y%m%d")
 
 Conversion between non-standard calendar and to/from pandas DatetimeIndexes is
@@ -141,7 +145,7 @@ use ``pandas`` when possible, i.e. when
 
 .. _1582-10-15: https://en.wikipedia.org/wiki/Gregorian_calendar
 
-.. ipython:: python
+.. jupyter-execute::
 
     dates = xr.date_range(
         start="2001", periods=24, freq="MS", calendar="noleap", use_cftime=True
@@ -158,9 +162,12 @@ For data indexed by a :py:class:`~xarray
 
 - `Partial datetime string indexing`_:
 
-.. ipython:: python
+.. jupyter-execute::
 
     da.sel(time="0001")
+
+.. jupyter-execute::
+
     da.sel(time=slice("0001-05", "0002-02"))
 
 .. note::
@@ -180,59 +187,83 @@ For data indexed by a :py:class:`~xarray
   "season", "dayofyear", "dayofweek", and "days_in_month") with the addition
   of "calendar", absent from pandas:
 
-.. ipython:: python
+.. jupyter-execute::
 
     da.time.dt.year
+
+.. jupyter-execute::
+
     da.time.dt.month
+
+.. jupyter-execute::
+
     da.time.dt.season
+
+.. jupyter-execute::
+
     da.time.dt.dayofyear
+
+.. jupyter-execute::
+
     da.time.dt.dayofweek
+
+.. jupyter-execute::
+
     da.time.dt.days_in_month
+
+.. jupyter-execute::
+
     da.time.dt.calendar
 
 - Rounding of datetimes to fixed frequencies via the ``dt`` accessor:
 
-.. ipython:: python
+.. jupyter-execute::
+
+    da.time.dt.ceil("3D").head()
+
+.. jupyter-execute::
+
+    da.time.dt.floor("5D").head()
+
+.. jupyter-execute::
 
-    da.time.dt.ceil("3D")
-    da.time.dt.floor("5D")
-    da.time.dt.round("2D")
+    da.time.dt.round("2D").head()
 
 - Group-by operations based on datetime accessor attributes (e.g. by month of
   the year):
 
-.. ipython:: python
+.. jupyter-execute::
 
     da.groupby("time.month").sum()
 
 - Interpolation using :py:class:`cftime.datetime` objects:
 
-.. ipython:: python
+.. jupyter-execute::
 
     da.interp(time=[DatetimeNoLeap(1, 1, 15), DatetimeNoLeap(1, 2, 15)])
 
 - Interpolation using datetime strings:
 
-.. ipython:: python
+.. jupyter-execute::
 
     da.interp(time=["0001-01-15", "0001-02-15"])
 
 - Differentiation:
 
-.. ipython:: python
+.. jupyter-execute::
 
     da.differentiate("time")
 
 - Serialization:
 
-.. ipython:: python
+.. jupyter-execute::
 
     da.to_netcdf("example-no-leap.nc")
     reopened = xr.open_dataset("example-no-leap.nc")
     reopened
 
-.. ipython:: python
-    :suppress:
+.. jupyter-execute::
+    :hide-code:
 
     import os
 
@@ -241,7 +272,7 @@ For data indexed by a :py:class:`~xarray
 
 - And resampling along the time dimension for data indexed by a :py:class:`~xarray.CFTimeIndex`:
 
-.. ipython:: python
+.. jupyter-execute::
 
     da.resample(time="81min", closed="right", label="right", offset="3min").mean()
 
diff -pruN 2025.03.1-8/doc/whats-new.rst 2025.10.1-1/doc/whats-new.rst
--- 2025.03.1-8/doc/whats-new.rst	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/doc/whats-new.rst	2025-10-10 10:38:05.000000000 +0000
@@ -1,18 +1,680 @@
 .. currentmodule:: xarray
 
+.. _whats-new:
+
 What's New
 ==========
 
-.. ipython:: python
-    :suppress:
+.. _whats-new.2025.10.1:
 
-    import numpy as np
-    import pandas as pd
-    import xarray as xray
-    import xarray
-    import xarray as xr
+v2025.10.1 (October 7, 2025)
+----------------------------
+
+This release reverts a breaking change to Xarray's preferred netCDF backend.
+
+Breaking changes
+~~~~~~~~~~~~~~~~
+
+- Xarray's default engine for reading/writing netCDF files has been reverted to
+  prefer netCDF4 over h5netcdf over scipy, which was the default before
+  v2025.09.1. This change had larger implications for the ecosystem than we
+  anticipated. We are still considering changing the default in the future, but
+  will be a bit more careful about the implications. See :issue:`10657` and
+  linked issues for discussion. The behavior can still be customized, e.g., with
+  ``xr.set_options(netcdf_engine_order=['h5netcdf', 'netcdf4', 'scipy'])``.
+  By `Stephan Hoyer <https://github.com/shoyer>`_.
+
+New features
+~~~~~~~~~~~~
+
+- Coordinates are ordered to match dims when displaying Xarray objects. (:pull:`10778`).
+  By `Julia Signell <https://github.com/jsignell>`_.
+
+Bug fixes
+~~~~~~~~~
+
+- Fix error raised when writing scalar variables to Zarr with ``region={}``
+  (:pull:`10796`).
+  By `Stephan Hoyer <https://github.com/shoyer>`_.
+
+
+.. _whats-new.2025.09.1:
+
+v2025.09.1 (September 29, 2025)
+-------------------------------
+
+This release contains improvements to netCDF IO and the
+:py:func:`DataTree.from_dict` constructor, as well as a variety of bug fixes.
+In particular, the default netCDF backend has switched from netCDF4 to h5netcdf,
+which is typically faster.
+
+Thanks to the 17 contributors to this release:
+Claude, Deepak Cherian, Dimitri Papadopoulos Orfanos, Dylan H. Morris, Emmanuel Mathot, Ian Hunt-Isaak, Joren Hammudoglu, Julia Signell, Justus Magin, Maximilian Roos, Nick Hodgskin, Spencer Clark, Stephan Hoyer, Tom Nicholas, gronniger, joseph nowak and pierre-manchon
+
+New Features
+~~~~~~~~~~~~
+
+- :py:func:`DataTree.from_dict` now supports passing in ``DataArray`` and nested
+  dictionary values, and has a ``coords`` argument for specifying coordinates as
+  ``DataArray`` objects (:pull:`10658`).
+- ``engine='netcdf4'`` now supports reading and writing in-memory netCDF files.
+  All of Xarray's netCDF backends now support in-memory reads and writes
+  (:pull:`10624`).
+  By `Stephan Hoyer <https://github.com/shoyer>`_.
+
+Breaking changes
+~~~~~~~~~~~~~~~~
+
+- :py:meth:`Dataset.update` now returns ``None``, instead of the updated dataset. This
+  completes the deprecation cycle started in version 0.17. The method still updates the
+  dataset in-place. (:issue:`10167`)
+  By `Maximilian Roos <https://github.com/max-sixty>`_.
+
+- The default ``engine`` when reading/writing netCDF files is now h5netcdf
+  or scipy, which are typically faster than the prior default of netCDF4-python.
+  You can control this default behavior explicitly via the new
+  ``netcdf_engine_order`` parameter in :py:func:`~xarray.set_options`, e.g.,
+  ``xr.set_options(netcdf_engine_order=['netcdf4', 'scipy', 'h5netcdf'])`` to
+  restore the prior defaults (:issue:`10657`).
+  By `Stephan Hoyer <https://github.com/shoyer>`_.
+
+- The HTML reprs for :py:class:`DataArray`, :py:class:`Dataset` and
+  :py:class:`DataTree` have been tweaked to hide empty sections, consistent
+  with the text reprs. The ``DataTree`` HTML repr also now automatically expands
+  sub-groups (:pull:`10785`).
+  By `Stephan Hoyer <https://github.com/shoyer>`_.
+
+- Zarr stores written with Xarray now consistently use a default Zarr fill value
+  of ``NaN`` for float variables, for both Zarr v2 and v3 (:issue:`10646``). All
+  other dtypes still use the Zarr default ``fill_value`` of zero. To customize,
+  explicitly set encoding in :py:meth:`~Dataset.to_zarr`, e.g.,
+  ``encoding=dict.fromkey(ds.data_vars, {'fill_value': 0})``.
+  By `Stephan Hoyer <https://github.com/shoyer>`_.
+
+Deprecations
+~~~~~~~~~~~~
+
+
+Bug fixes
+~~~~~~~~~
+
+- Xarray objects opened from file-like objects with ``engine='h5netcdf'`` can
+  now be pickled, as long as the underlying file-like object also supports
+  pickle (:issue:`10712`).
+  By `Stephan Hoyer <https://github.com/shoyer>`_.
+
+- Closing Xarray objects opened from file-like objects with ```engine='scipy'``
+  no longer closes the underlying file, consistent with the h5netcdf backend
+  (:pull:`10624`).
+  By `Stephan Hoyer <https://github.com/shoyer>`_.
+
+- Fix the ``align_chunks`` parameter on the :py:meth:`~xarray.Dataset.to_zarr` method, it was not being
+  passed to the underlying :py:meth:`~xarray.backends.api` method (:issue:`10501`, :pull:`10516`).
+- Fix error when encoding an empty :py:class:`numpy.datetime64` array
+  (:issue:`10722`, :pull:`10723`). By `Spencer Clark
+  <https://github.com/spencerkclark>`_.
+- Propagate coordinate attrs in :py:meth:`xarray.Dataset.map` (:issue:`9317`, :pull:`10602`).
+- Fix error from ``to_netcdf(..., compute=False)`` when using Dask Distributed
+  (:issue:`10725`).
+  By `Stephan Hoyer <https://github.com/shoyer>`_.
+- Propagation coordinate attrs in :py:meth:`xarray.Dataset.map` (:issue:`9317`, :pull:`10602`).
+  By `Justus Magin <https://github.com/keewis>`_.
+- Allow ``combine_attrs="drop_conflicts"`` to handle objects with ``__eq__`` methods that return
+  non-bool values (e.g., numpy arrays) without raising ``ValueError`` (:pull:`10726`).
+  By `Maximilian Roos <https://github.com/max-sixty>`_.
+
+Documentation
+~~~~~~~~~~~~~
+
+- Fixed Zarr encoding documentation with consistent examples and added comprehensive
+  coverage of dimension and coordinate encoding differences between Zarr V2 and V3 formats.
+  The documentation shows what users will see when accessing Zarr files
+  with raw zarr-python, and explains the relationship between ``_ARRAY_DIMENSIONS``
+  (Zarr V2), ``dimension_names`` metadata (Zarr V3), and CF ``coordinates`` attributes.
+  (:pull:`10720`)
+  By `Emmanuel Mathot <https://github.com/emmanuelmathot>`_.
+
+Internal Changes
+~~~~~~~~~~~~~~~~
+
+- Refactor structure of ``backends`` module to separate code for reading data from code for writing data (:pull:`10771`).
+  By `Tom Nicholas <https://github.com/TomNicholas>`_.
+- All test files now have full mypy type checking enabled (``check_untyped_defs = true``),
+  improving type safety and making the test suite a better reference for type annotations.
+  (:pull:`10768`)
+  By `Maximilian Roos <https://github.com/max-sixty>`_.
+
+.. _whats-new.2025.09.0:
+
+v2025.09.0 (September 2, 2025)
+------------------------------
+
+This release brings a number of small improvements and fixes, especially related
+to writing DataTree objects and netCDF files to disk.
+
+Thanks to the 13 contributors to this release:
+Benoit Bovy, DHRUVA KUMAR KAUSHAL, Deepak Cherian, Dhruva Kumar Kaushal, Giacomo Caria, Ian Hunt-Isaak, Illviljan, Justus Magin, Kai Mühlbauer, Ruth Comer, Spencer Clark, Stephan Hoyer and Tom Nicholas
+
+New Features
+~~~~~~~~~~~~
+- Support rechunking by :py:class:`~xarray.groupers.SeasonResampler` for seasonal data analysis (:issue:`10425`, :pull:`10519`).
+  By `Dhruva Kumar Kaushal <https://github.com/dhruvak001>`_.
+- Add convenience methods to :py:class:`~xarray.Coordinates` (:pull:`10318`)
+  By `Justus Magin <https://github.com/keewis>`_.
+- Added :py:func:`load_datatree` for loading ``DataTree`` objects into memory
+  from disk. It has the same relationship to :py:func:`open_datatree`, as
+  :py:func:`load_dataset` has to :py:func:`open_dataset`.
+  By `Stephan Hoyer <https://github.com/shoyer>`_.
+- ``compute=False`` is now supported by :py:meth:`DataTree.to_netcdf` and
+  :py:meth:`DataTree.to_zarr`.
+  By `Stephan Hoyer <https://github.com/shoyer>`_.
+- ``open_dataset`` will now correctly infer a path ending in ``.zarr/`` as zarr
+  By `Ian Hunt-Isaak <https://github.com/ianhi>`_.
+
+Breaking changes
+~~~~~~~~~~~~~~~~
+- Following pandas 3.0 (`pandas-dev/pandas#61985
+  <https://github.com/pandas-dev/pandas/pull/61985>`_), ``Day`` is no longer
+  considered a ``Tick``-like frequency. Therefore non-``None`` values of
+  ``offset`` and non-``"start_day"`` values of ``origin`` will have no effect
+  when resampling to a daily frequency for objects indexed by a
+  :py:class:`xarray.CFTimeIndex`. As in `pandas-dev/pandas#62101
+  <https://github.com/pandas-dev/pandas/pull/62101>`_ warnings will be emitted
+  if non default values are provided in this context (:issue:`10640`,
+  :pull:`10650`). By `Spencer Clark <https://github.com/spencerkclark>`_.
+
+- The default backend ``engine`` used by :py:meth:`Dataset.to_netcdf`
+  and :py:meth:`DataTree.to_netcdf` is now chosen consistently with
+  :py:func:`open_dataset` and :py:func:`open_datatree`, using whichever netCDF
+  libraries are available and valid, and preferring netCDF4 to h5netcdf to scipy
+  (:issue:`10654`). This will change the default backend in some edge cases
+  (e.g., from scipy to netCDF4 when writing to a file-like object or bytes). To
+  override these new defaults, set ``engine`` explicitly.
+  By `Stephan Hoyer <https://github.com/shoyer>`_.
+- The return value of :py:meth:`Dataset.to_netcdf` without ``path`` is now a
+  ``memoryview`` object instead of ``bytes`` (:pull:`10656`). This removes an
+  unnecessary memory copy and ensures consistency when using either
+  ``engine="scipy"`` or ``engine="h5netcdf"``. If you need a bytes object,
+  simply wrap the return value of ``to_netcdf()`` with ``bytes()``.
+  By `Stephan Hoyer <https://github.com/shoyer>`_.
 
-    np.random.seed(123456)
+Bug fixes
+~~~~~~~~~
+- Fix contour plots not normalizing the colors correctly when using for example logarithmic norms. (:issue:`10551`, :pull:`10565`)
+  By `Jimmy Westling <https://github.com/illviljan>`_.
+- Fix distribution of ``auto_complex`` keyword argument for open_datatree (:issue:`10631`, :pull:`10632`).
+  By `Kai Mühlbauer <https://github.com/kmuehlbauer>`_.
+- Warn instead of raise in case of misconfiguration of ``unlimited_dims`` originating from dataset.encoding, to prevent breaking users workflows (:issue:`10647`, :pull:`10648`).
+  By `Kai Mühlbauer <https://github.com/kmuehlbauer>`_.
+
+- :py:meth:`DataTree.to_netcdf` and :py:meth:`DataTree.to_zarr` now avoid
+  redundant computation of Dask arrays with cross-group dependencies
+  (:issue:`10637`).
+  By `Stephan Hoyer <https://github.com/shoyer>`_.
+- :py:meth:`DataTree.to_netcdf` had h5netcdf hard-coded as default
+  (:issue:`10654`).
+  By `Stephan Hoyer <https://github.com/shoyer>`_.
+
+Internal Changes
+~~~~~~~~~~~~~~~~
+- Run ``TestNetCDF4Data`` as ``TestNetCDF4DataTree`` through ``open_datatree`` (:pull:`10632`).
+  By `Kai Mühlbauer <https://github.com/kmuehlbauer>`_.
+
+
+.. _whats-new.2025.08.0:
+
+v2025.08.0 (August 14, 2025)
+----------------------------
+
+This release brings the ability to load xarray objects asynchronously, write netCDF as bytes, fixes a number of bugs, and starts an important deprecation cycle for changing the default values of keyword arguments for various xarray combining functions.
+
+Thanks to the 24 contributors to this release:
+Alfonso Ladino, Brigitta Sipőcz, Claude, Deepak Cherian, Dimitri Papadopoulos Orfanos, Eric Jansen, Ian Hunt-Isaak, Ilan Gold, Illviljan, Julia Signell, Justus Magin, Kai Mühlbauer, Mathias Hauser, Matthew, Michael Niklas, Miguel Jimenez, Nick Hodgskin, Pratiman, Scott Staniewicz, Spencer Clark, Stephan Hoyer, Tom Nicholas, Yang Yang and jemmajeffree
+
+New Features
+~~~~~~~~~~~~
+
+- Added :py:meth:`DataTree.prune` method to remove empty nodes while preserving tree structure.
+  Useful for cleaning up DataTree after time-based filtering operations (:issue:`10590`, :pull:`10598`).
+  By `Alfonso Ladino <https://github.com/aladinor>`_.
+- Added new asynchronous loading methods :py:meth:`Dataset.load_async`, :py:meth:`DataArray.load_async`, :py:meth:`Variable.load_async`.
+  Note that users are expected to limit concurrency themselves - xarray does not internally limit concurrency in any way.
+  (:issue:`10326`, :pull:`10327`) By `Tom Nicholas <https://github.com/TomNicholas>`_.
+- :py:meth:`DataTree.to_netcdf` can now write to a file-like object, or return bytes if called without a filepath. (:issue:`10570`)
+  By `Matthew Willson <https://github.com/mjwillson>`_.
+- Added exception handling for invalid files in :py:func:`open_mfdataset`. (:issue:`6736`)
+  By `Pratiman Patel <https://github.com/pratiman-91>`_.
+
+Breaking changes
+~~~~~~~~~~~~~~~~
+
+- When writing to NetCDF files with groups, Xarray no longer redefines dimensions
+  that have the same size in parent groups (:issue:`10241`). This conforms with
+  `CF Conventions for group scrope <https://cfconventions.org/cf-conventions/cf-conventions.html#_scope>`_
+  but may require adjustments for code that consumes NetCDF files produced by Xarray.
+  By `Stephan Hoyer <https://github.com/shoyer>`_.
+
+Deprecations
+~~~~~~~~~~~~
+
+- Start a deprecation cycle for changing the default keyword arguments to :py:func:`concat`, :py:func:`merge`,
+  :py:func:`combine_nested`, :py:func:`combine_by_coords`, and :py:func:`open_mfdataset`.
+  Emits a :py:class:`FutureWarning` when using old defaults and new defaults would result in different behavior.
+  Adds an option: ``use_new_combine_kwarg_defaults`` to opt in to new defaults immediately.
+  New values are:
+
+  - ``data_vars``: None which means ``all`` when concatenating along a new dimension, and ``"minimal"`` when concatenating along an existing dimension
+  - ``coords``: "minimal"
+  - ``compat``: "override"
+  - ``join``: "exact"
+
+  (:issue:`8778`, :issue:`1385`, :pull:`10062`). By `Julia Signell <https://github.com/jsignell>`_.
+
+Bug fixes
+~~~~~~~~~
+
+- Fix Pydap Datatree backend testing. Testing now compares elements of (unordered) two sets (before, lists) (:pull:`10525`).
+  By `Miguel Jimenez-Urias <https://github.com/Mikejmnez>`_.
+- Fix ``KeyError`` when passing a ``dim`` argument different from the default to ``convert_calendar`` (:pull:`10544`).
+  By `Eric Jansen <https://github.com/ej81>`_.
+- Fix transpose of boolean arrays read from disk. (:issue:`10536`)
+  By `Deepak Cherian <https://github.com/dcherian>`_.
+- Fix detection of the ``h5netcdf`` backend. Xarray now selects ``h5netcdf`` if the default ``netCDF4`` engine is not available (:issue:`10401`, :pull:`10557`).
+  By `Scott Staniewicz <https://github.com/scottstanie>`_.
+- Fix :py:func:`merge` to prevent altering original object depending on join value (:pull:`10596`)
+  By `Julia Signell <https://github.com/jsignell>`_.
+- Ensure ``unlimited_dims`` passed to :py:meth:`xarray.DataArray.to_netcdf`, :py:meth:`xarray.Dataset.to_netcdf` or :py:meth:`xarray.DataTree.to_netcdf` only contains dimensions present in the object; raise ``ValueError`` otherwise (:issue:`10549`, :pull:`10608`).
+  By `Kai Mühlbauer <https://github.com/kmuehlbauer>`_.
+
+Documentation
+~~~~~~~~~~~~~
+
+- Clarify lazy behaviour and eager loading for ``chunks=None`` in :py:func:`~xarray.open_dataset`, :py:func:`~xarray.open_dataarray`, :py:func:`~xarray.open_datatree`, :py:func:`~xarray.open_groups` and :py:func:`~xarray.open_zarr` (:issue:`10612`, :pull:`10627`).
+  By `Kai Mühlbauer <https://github.com/kmuehlbauer>`_.
+
+Performance
+~~~~~~~~~~~
+
+- Speed up non-numeric scalars when calling :py:meth:`Dataset.interp`. (:issue:`10054`, :pull:`10554`)
+  By `Jimmy Westling <https://github.com/illviljan>`_.
+
+.. _whats-new.2025.07.1:
+
+v2025.07.1 (July 09, 2025)
+--------------------------
+
+This release brings a lot of improvements to flexible indexes functionality, including new classes
+to ease building of new indexes with custom coordinate transforms (:py:class:`indexes.CoordinateTransformIndex`)
+and tree-like index structures (:py:class:`indexes.NDPointIndex`).
+See a `new gallery <https://xarray-indexes.readthedocs.io>`_ showing off the possibilities enabled by flexible indexes.
+
+Thanks to the 7 contributors to this release:
+Benoit Bovy, Deepak Cherian, Dhruva Kumar Kaushal, Dimitri Papadopoulos Orfanos, Illviljan, Justus Magin and Tom Nicholas
+
+New Features
+~~~~~~~~~~~~
+- New :py:class:`xarray.indexes.NDPointIndex`, which by default uses :py:class:`scipy.spatial.KDTree` under the hood for
+  the selection of irregular, n-dimensional data (:pull:`10478`).
+  By `Benoit Bovy <https://github.com/benbovy>`_.
+- Allow skipping the creation of default indexes when opening datasets (:pull:`8051`).
+  By `Benoit Bovy <https://github.com/benbovy>`_ and `Justus Magin <https://github.com/keewis>`_.
+
+Bug fixes
+~~~~~~~~~
+
+- :py:meth:`Dataset.set_xindex` now raises a helpful error when a custom index
+  creates extra variables that don't match the provided coordinate names, instead
+  of silently ignoring them. The error message suggests using the factory method
+  pattern with :py:meth:`xarray.Coordinates.from_xindex` and
+  :py:meth:`Dataset.assign_coords` for advanced use cases (:issue:`10499`, :pull:`10503`).
+  By `Dhruva Kumar Kaushal <https://github.com/dhruvak001>`_.
+
+Documentation
+~~~~~~~~~~~~~
+- A `new gallery <https://xarray-indexes.readthedocs.io>`_ showing off the possibilities enabled by flexible indexes.
+
+Internal Changes
+~~~~~~~~~~~~~~~~
+
+- Refactored the ``PandasIndexingAdapter`` and
+  ``CoordinateTransformIndexingAdapter`` internal indexing classes. Coordinate
+  variables that wrap a :py:class:`pandas.RangeIndex`, a
+  :py:class:`pandas.MultiIndex` or a
+  :py:class:`xarray.indexes.CoordinateTransform` are now displayed as lazy variables
+  in the Xarray data reprs (:pull:`10355`).
+  By `Benoit Bovy <https://github.com/benbovy>`_.
+
+.. _whats-new.2025.07.0:
+
+v2025.07.0 (Jul 3, 2025)
+------------------------
+
+This release extends xarray's support for custom index classes, restores support for reading netCDF3 files with SciPy, updates minimum dependencies, and fixes a number of bugs.
+
+Thanks to the 17 contributors to this release:
+Bas Nijholt, Benoit Bovy, Deepak Cherian, Dhruva Kumar Kaushal, Dimitri Papadopoulos Orfanos, Ian Hunt-Isaak, Kai Mühlbauer, Mathias Hauser, Maximilian Roos, Miguel Jimenez, Nick Hodgskin, Scott Henderson, Shuhao Cao, Spencer Clark, Stephan Hoyer, Tom Nicholas and Zsolt Cserna
+
+New Features
+~~~~~~~~~~~~
+
+- Expose :py:class:`~xarray.indexes.RangeIndex`, and :py:class:`~xarray.indexes.CoordinateTransformIndex` as public api
+  under the ``xarray.indexes`` namespace. By `Deepak Cherian <https://github.com/dcherian>`_.
+- Support zarr-python's new ``.supports_consolidated_metadata`` store property (:pull:`10457``).
+  by `Tom Nicholas <https://github.com/TomNicholas>`_.
+- Better error messages when encoding data to be written to disk fails (:pull:`10464`).
+  By `Stephan Hoyer <https://github.com/shoyer>`_
+
+Breaking changes
+~~~~~~~~~~~~~~~~
+
+The minimum versions of some dependencies were changed (:issue:`10417`, :pull:`10438`):
+By `Dhruva Kumar Kaushal <https://github.com/dhruvak001>`_.
+
+.. list-table::
+   :header-rows: 1
+   :widths: 30 20 20
+
+   * - Dependency
+     - Old Version
+     - New Version
+   * - Python
+     - 3.10
+     - 3.11
+   * - array-api-strict
+     - 1.0
+     - 1.1
+   * - boto3
+     - 1.29
+     - 1.34
+   * - bottleneck
+     - 1.3
+     - 1.4
+   * - cartopy
+     - 0.22
+     - 0.23
+   * - dask-core
+     - 2023.11
+     - 2024.6
+   * - distributed
+     - 2023.11
+     - 2024.6
+   * - flox
+     - 0.7
+     - 0.9
+   * - h5py
+     - 3.8
+     - 3.11
+   * - hdf5
+     - 1.12
+     - 1.14
+   * - iris
+     - 3.7
+     - 3.9
+   * - lxml
+     - 4.9
+     - 5.1
+   * - matplotlib-base
+     - 3.7
+     - 3.8
+   * - numba
+     - 0.57
+     - 0.60
+   * - numbagg
+     - 0.6
+     - 0.8
+   * - numpy
+     - 1.24
+     - 1.26
+   * - packaging
+     - 23.2
+     - 24.1
+   * - pandas
+     - 2.1
+     - 2.2
+   * - pint
+     - 0.22
+     - 0.24
+   * - pydap
+     - N/A
+     - 3.5
+   * - scipy
+     - 1.11
+     - 1.13
+   * - sparse
+     - 0.14
+     - 0.15
+   * - typing_extensions
+     - 4.8
+     - Removed
+   * - zarr
+     - 2.16
+     - 2.18
+
+Bug fixes
+~~~~~~~~~
+
+- Fix Pydap test_cmp_local_file for numpy 2.3.0 changes, 1. do always return arrays for all versions and 2. skip astype(str) for numpy >= 2.3.0 for expected data. (:pull:`10421`)
+  By `Kai Mühlbauer <https://github.com/kmuehlbauer>`_.
+- Fix the SciPy backend for netCDF3 files . (:issue:`8909`, :pull:`10376`)
+  By `Deepak Cherian <https://github.com/dcherian>`_.
+- Check and fix character array string dimension names, issue warnings as needed (:issue:`6352`, :pull:`10395`).
+  By `Kai Mühlbauer <https://github.com/kmuehlbauer>`_.
+- Fix the error message of :py:func:`testing.assert_equal` when two different :py:class:`DataTree` objects
+  are passed (:pull:`10440`). By `Mathias Hauser <https://github.com/mathause>`_.
+- Fix :py:func:`testing.assert_equal` with ``check_dim_order=False`` for :py:class:`DataTree` objects
+  (:pull:`10442`). By `Mathias Hauser <https://github.com/mathause>`_.
+- Fix Pydap backend testing. Now test forces string arrays to dtype "S" (pydap converts them to unicode type by default). Removes conditional to numpy version. (:issue:`10261`, :pull:`10482`)
+  By `Miguel Jimenez-Urias <https://github.com/Mikejmnez>`_.
+- Fix attribute overwriting bug when decoding encoded
+  :py:class:`numpy.timedelta64` values from disk with a dtype attribute
+  (:issue:`10468`, :pull:`10469`). By `Spencer Clark
+  <https://github.com/spencerkclark>`_.
+- Fix default ``"_FillValue"`` dtype coercion bug when encoding
+  :py:class:`numpy.timedelta64` values to an on-disk format that only supports
+  32-bit integers (:issue:`10466`, :pull:`10469`). By `Spencer Clark
+  <https://github.com/spencerkclark>`_.
+
+Internal Changes
+~~~~~~~~~~~~~~~~
+
+- Forward variable name down to coders for AbstractWritableDataStore.encode_variable and subclasses. (:pull:`10395`).
+  By `Kai Mühlbauer <https://github.com/kmuehlbauer>`_.
+
+.. _whats-new.2025.06.1:
+
+v2025.06.1 (Jun 11, 2025)
+-------------------------
+
+This is quick bugfix release to remove an unintended dependency on ``typing_extensions``.
+
+Thanks to the 4 contributors to this release:
+Alex Merose, Deepak Cherian, Ilan Gold and Simon Perkins
+
+Bug fixes
+~~~~~~~~~
+
+- Remove dependency on ``typing_extensions`` (:pull:`10413`). By `Simon Perkins <https://github.com/sjperkins>`_.
+
+.. _whats-new.2025.06.0:
+
+v2025.06.0 (Jun 10, 2025)
+-------------------------
+
+This release brings HTML reprs to the documentation, fixes to flexible Xarray indexes, performance optimizations, more ergonomic seasonal grouping and resampling
+with new :py:class:`~xarray.groupers.SeasonGrouper` and :py:class:`~xarray.groupers.SeasonResampler` objects, and bugfixes.
+Thanks to the 33 contributors to this release:
+Andrecho, Antoine Gibek, Benoit Bovy, Brian Michell, Christine P. Chai, David Huard, Davis Bennett, Deepak Cherian, Dimitri Papadopoulos Orfanos, Elliott Sales de Andrade, Erik, Erik Månsson, Giacomo Caria, Ilan Gold, Illviljan, Jesse Rusak, Jonathan Neuhauser, Justus Magin, Kai Mühlbauer, Kimoon Han, Konstantin Ntokas, Mark Harfouche, Michael Niklas, Nick Hodgskin, Niko Sirmpilatze, Pascal Bourgault, Scott Henderson, Simon Perkins, Spencer Clark, Tom Vo, Trevor James Smith, joseph nowak and micguerr-bopen
+
+New Features
+~~~~~~~~~~~~
+- Switch docs to jupyter-execute sphinx extension for HTML reprs. (:issue:`3893`, :pull:`10383`)
+  By `Scott Henderson <https://github.com/scottyhq>`_.
+- Allow an Xarray index that uses multiple dimensions checking equality with another
+  index for only a subset of those dimensions (i.e., ignoring the dimensions
+  that are excluded from alignment).
+  (:issue:`10243`, :pull:`10293`)
+  By `Benoit Bovy <https://github.com/benbovy>`_.
+- New :py:class:`~xarray.groupers.SeasonGrouper` and :py:class:`~xarray.groupers.SeasonResampler` objects for ergonomic seasonal aggregation.
+  See the docs on :ref:`seasonal_grouping` or `blog post <https://xarray.dev/blog/season-grouping>`_ for more.
+  By `Deepak Cherian <https://github.com/dcherian>`_.
+- Data corruption issues arising from misaligned Dask and Zarr chunks
+  can now be prevented using the new ``align_chunks`` parameter in
+  :py:meth:`~xarray.DataArray.to_zarr`. This option automatically rechunk
+  the Dask array to align it with the Zarr storage chunks. For now, it is
+  disabled by default, but this could change on the future.
+  (:issue:`9914`, :pull:`10336`)
+  By `Joseph Nowak <https://github.com/josephnowak>`_.
+
+Documentation
+~~~~~~~~~~~~~
+- HTML reprs! By `Scott Henderson <https://github.com/scottyhq>`_.
+
+Bug fixes
+~~~~~~~~~
+- Fix :py:class:`~xarray.groupers.BinGrouper` when ``labels`` is not specified (:issue:`10284`).
+  By `Deepak Cherian <https://github.com/dcherian>`_.
+- Allow accessing arbitrary attributes on Pandas ExtensionArrays.
+  By `Deepak Cherian <https://github.com/dcherian>`_.
+- Fix coding empty (zero-size) timedelta64 arrays, ``units`` taking precedence when encoding,
+  fallback to default values when decoding (:issue:`10310`, :pull:`10313`).
+  By `Kai Mühlbauer <https://github.com/kmuehlbauer>`_.
+- Use dtype from intermediate sum instead of source dtype or "int" for casting of count when
+  calculating mean in rolling for correct operations (preserve float dtypes,
+  correct mean of bool arrays) (:issue:`10340`, :pull:`10341`).
+  By `Kai Mühlbauer <https://github.com/kmuehlbauer>`_.
+- Improve the html ``repr`` of Xarray objects (dark mode, icons and variable attribute / data
+  dropdown sections).
+  (:pull:`10353`, :pull:`10354`)
+  By `Benoit Bovy <https://github.com/benbovy>`_.
+- Raise an error when attempting to encode :py:class:`numpy.datetime64` values
+  prior to the Gregorian calendar reform date of 1582-10-15 with a
+  ``"standard"`` or ``"gregorian"`` calendar. Previously we would warn and
+  encode these as :py:class:`cftime.DatetimeGregorian` objects, but it is not
+  clear that this is the user's intent, since this implicitly converts the
+  calendar of the datetimes from ``"proleptic_gregorian"`` to ``"gregorian"``
+  and prevents round-tripping them as :py:class:`numpy.datetime64` values
+  (:pull:`10352`). By `Spencer Clark <https://github.com/spencerkclark>`_.
+- Avoid unsafe casts from float to unsigned int in CFMaskCoder (:issue:`9815`, :pull:`9964`).
+  By ` Elliott Sales de Andrade <https://github.com/QuLogic>`_.
+
+Performance
+~~~~~~~~~~~
+- Lazily indexed arrays now use less memory to store keys by avoiding copies
+  in :py:class:`~xarray.indexing.VectorizedIndexer` and :py:class:`~xarray.indexing.OuterIndexer`
+  (:issue:`10316`).
+  By `Jesse Rusak <https://github.com/jder>`_.
+- Fix performance regression in interp where more data was loaded than was necessary. (:issue:`10287`).
+  By `Deepak Cherian <https://github.com/dcherian>`_.
+- Speed up encoding of :py:class:`cftime.datetime` objects by roughly a factor
+  of three (:pull:`8324`). By `Antoine Gibek <https://github.com/antscloud>`_.
+
+.. _whats-new.2025.04.0:
+
+v2025.04.0 (Apr 29, 2025)
+-------------------------
+
+This release brings bug fixes, better support for extension arrays including returning a
+:py:class:`pandas.IntervalArray` from ``groupby_bins``, and performance improvements.
+Thanks to the 24 contributors to this release:
+Alban Farchi, Andrecho, Benoit Bovy, Deepak Cherian, Dimitri Papadopoulos Orfanos, Florian Jetter, Giacomo Caria, Ilan Gold, Illviljan, Joren Hammudoglu, Julia Signell, Kai Muehlbauer, Kai Mühlbauer, Mathias Hauser, Mattia Almansi, Michael Sumner, Miguel Jimenez, Nick Hodgskin (🦎 Vecko), Pascal Bourgault, Philip Chmielowiec, Scott Henderson, Spencer Clark, Stephan Hoyer and Tom Nicholas
+
+New Features
+~~~~~~~~~~~~
+- By default xarray now encodes :py:class:`numpy.timedelta64` values by
+  converting to :py:class:`numpy.int64` values and storing ``"dtype"`` and
+  ``"units"`` attributes consistent with the dtype of the in-memory
+  :py:class:`numpy.timedelta64` values, e.g. ``"timedelta64[s]"`` and
+  ``"seconds"`` for second-resolution timedeltas. These values will always be
+  decoded to timedeltas without a warning moving forward. Timedeltas encoded
+  via the previous approach can still be roundtripped exactly, but in the
+  future will not be decoded by default (:issue:`1621`, :issue:`10099`,
+  :pull:`10101`). By `Spencer Clark <https://github.com/spencerkclark>`_.
+
+- Added `scipy-stubs <https://github.com/scipy/scipy-stubs>`_ to the ``xarray[types]`` dependencies.
+  By `Joren Hammudoglu <https://github.com/jorenham>`_.
+- Added a :mod:`xarray.typing` module  to expose selected public types for use in downstream libraries and static type checking.
+  (:issue:`10179`, :pull:`10215`).
+  By `Michele Guerreri <https://github.com/micguerr-bopen>`_.
+- Improved compatibility with OPeNDAP DAP4 data model for backend engine ``pydap``. This
+  includes ``datatree`` support, and removing slashes from dimension names. By
+  `Miguel Jimenez-Urias <https://github.com/Mikejmnez>`_.
+- Allow assigning index coordinates with non-array dimension(s) in a :py:class:`DataArray` by overriding
+  :py:meth:`Index.should_add_coord_to_array`. For example, this enables support for CF boundaries coordinate (e.g.,
+  ``time(time)`` and ``time_bnds(time, nbnd)``) in a DataArray (:pull:`10137`).
+  By `Benoit Bovy <https://github.com/benbovy>`_.
+- Improved support pandas categorical extension as indices (i.e., :py:class:`pandas.IntervalIndex`). (:issue:`9661`, :pull:`9671`)
+  By `Ilan Gold <https://github.com/ilan-gold>`_.
+- Improved checks and errors raised when trying to align objects with conflicting indexes.
+  It is now possible to align objects each with multiple indexes sharing common dimension(s).
+  (:issue:`7695`, :pull:`10251`)
+  By `Benoit Bovy <https://github.com/benbovy>`_.
+
+Breaking changes
+~~~~~~~~~~~~~~~~
+
+- The minimum versions of some dependencies were changed
+
+  ===================== =========  =======
+   Package                    Old      New
+  ===================== =========  =======
+    pydap                    3.4     3.5.0
+  ===================== =========  =======
+
+
+- Reductions with ``groupby_bins`` or those that involve :py:class:`xarray.groupers.BinGrouper`
+  now return objects indexed by :py:meth:`pandas.IntervalArray` objects,
+  instead of numpy object arrays containing tuples. This change enables interval-aware indexing of
+  such Xarray objects. (:pull:`9671`). By `Ilan Gold <https://github.com/ilan-gold>`_.
+- Remove ``PandasExtensionArrayIndex`` from :py:attr:`xarray.Variable.data` when the attribute is a :py:class:`pandas.api.extensions.ExtensionArray` (:pull:`10263`). By `Ilan Gold <https://github.com/ilan-gold>`_.
+- The html and text ``repr`` for ``DataTree`` are now truncated. Up to 6 children are displayed
+  for each node -- the first 3 and the last 3 children -- with a ``...`` between them. The number
+  of children to include in the display is configurable via options. For instance use
+  ``set_options(display_max_children=8)`` to display 8 children rather than the default 6. (:pull:`10139`)
+  By `Julia Signell <https://github.com/jsignell>`_.
+
+
+Deprecations
+~~~~~~~~~~~~
+
+- The deprecation cycle for the  ``eagerly_compute_group`` kwarg to ``groupby`` and ``groupby_bins``
+  is now complete.
+  By `Deepak Cherian <https://github.com/dcherian>`_.
+
+Bug fixes
+~~~~~~~~~
+
+- :py:meth:`~xarray.Dataset.to_stacked_array` now uses dimensions in order of appearance.
+  This fixes the issue where using :py:meth:`~xarray.Dataset.transpose` before :py:meth:`~xarray.Dataset.to_stacked_array`
+  had no effect. (Mentioned in :issue:`9921`)
+- Enable ``keep_attrs`` in ``DatasetView.map`` relevant for :py:func:`map_over_datasets`  (:pull:`10219`)
+  By `Mathias Hauser <https://github.com/mathause>`_.
+- Variables with no temporal dimension are left untouched by :py:meth:`~xarray.Dataset.convert_calendar`. (:issue:`10266`,  :pull:`10268`)
+  By `Pascal Bourgault <https://github.com/aulemahal>`_.
+- Enable ``chunk_key_encoding`` in :py:meth:`~xarray.Dataset.to_zarr` for Zarr v2 Datasets (:pull:`10274`)
+  By `BrianMichell <https://github.com/BrianMichell>`_.
+
+Documentation
+~~~~~~~~~~~~~
+
+- Fix references to core classes in docs (:issue:`10195`, :pull:`10207`).
+  By `Mattia Almansi <https://github.com/malmans2>`_.
+- Fix references to point to updated pydap documentation (:pull:`10182`).
+  By `Miguel Jimenez-Urias <https://github.com/Mikejmnez>`_.
+- Switch to `pydata-sphinx-theme <https://github.com/pydata/pydata-sphinx-theme>`_ from `sphinx-book-theme <https://github.com/executablebooks/sphinx-book-theme>`_ (:pull:`8708`).
+  By `Scott Henderson <https://github.com/scottyhq>`_.
+
+- Add a dedicated 'Complex Numbers' sections to the User Guide (:issue:`10213`, :pull:`10235`).
+  By `Andre Wendlinger <https://github.com/andrewendlinger>`_.
+
+Internal Changes
+~~~~~~~~~~~~~~~~
+- Avoid stacking when grouping by a chunked array. This can be a large performance improvement.
+  By `Deepak Cherian <https://github.com/dcherian>`_.
+- The implementation of ``Variable.set_dims`` has changed to use array indexing syntax
+  instead of ``np.broadcast_to`` to perform dimension expansions where
+  all new dimensions have a size of 1. This should improve compatibility with
+  duck arrays that do not support broadcasting (:issue:`9462`, :pull:`10277`).
+  By `Mark Harfouche <https://github.com/hmaarrfk>`_.
 
 .. _whats-new.2025.03.1:
 
@@ -25,9 +687,11 @@ Andrecho, Deepak Cherian, Ian Hunt-Isaak
 
 New Features
 ~~~~~~~~~~~~
-
 - Allow setting a ``fill_value`` for Zarr format 3 arrays. Specify ``fill_value`` in ``encoding`` as usual.
   (:issue:`10064`). By `Deepak Cherian <https://github.com/dcherian>`_.
+- Added :py:class:`indexes.RangeIndex` as an alternative, memory saving Xarray index representing
+  a 1-dimensional bounded interval with evenly spaced floating values (:issue:`8473`, :pull:`10076`).
+  By `Benoit Bovy <https://github.com/benbovy>`_.
 
 Breaking changes
 ~~~~~~~~~~~~~~~~
@@ -200,7 +864,7 @@ error messages have been removed or rewr
 non-nanosecond datetimes (with ``'us'``, ``'ms'`` or ``'s'`` resolution) when
 creating DataArray's from scratch, picking the lowest possible resolution:
 
-.. ipython:: python
+.. code:: python
 
     xr.DataArray(data=[np.datetime64("2000-01-01", "D")], dims=("time",))
 
@@ -450,7 +1114,7 @@ Documentation
 
 Internal Changes
 ~~~~~~~~~~~~~~~~
-- ``persist`` methods now route through the :py:class:`xr.core.parallelcompat.ChunkManagerEntrypoint` (:pull:`9682`).
+- ``persist`` methods now route through the :py:class:`xr.namedarray.parallelcompat.ChunkManagerEntrypoint` (:pull:`9682`).
   By `Sam Levang <https://github.com/slevang>`_.
 
 .. _whats-new.2024.10.0:
@@ -687,7 +1351,7 @@ Bug fixes
 - Promote floating-point numeric datetimes before decoding (:issue:`9179`, :pull:`9182`).
   By `Justus Magin <https://github.com/keewis>`_.
 - Address regression introduced in :pull:`9002` that prevented objects returned
-  by py:meth:`DataArray.convert_calendar` to be indexed by a time index in
+  by :py:meth:`DataArray.convert_calendar` to be indexed by a time index in
   certain circumstances (:issue:`9138`, :pull:`9192`).
   By `Mark Harfouche <https://github.com/hmaarrfk>`_ and `Spencer Clark <https://github.com/spencerkclark>`_.
 - Fix static typing of tolerance arguments by allowing ``str`` type (:issue:`8892`, :pull:`9194`).
@@ -781,7 +1445,7 @@ New Features
   for example, will retain the object.  However, one cannot do operations that are not possible on the ``ExtensionArray``
   then, such as broadcasting. (:issue:`5287`, :issue:`8463`, :pull:`8723`)
   By `Ilan Gold <https://github.com/ilan-gold>`_.
-- :py:func:`testing.assert_allclose`/:py:func:`testing.assert_equal` now accept a new argument ``check_dims="transpose"``, controlling whether a transposed array is considered equal. (:issue:`5733`, :pull:`8991`)
+- :py:func:`testing.assert_allclose` / :py:func:`testing.assert_equal` now accept a new argument ``check_dims="transpose"``, controlling whether a transposed array is considered equal. (:issue:`5733`, :pull:`8991`)
   By `Ignacio Martinez Vazquez <https://github.com/ignamv>`_.
 - Added the option to avoid automatically creating 1D pandas indexes in :py:meth:`Dataset.expand_dims()`, by passing the new kwarg
   ``create_index_for_new_dim=False``. (:pull:`8960`)
@@ -1661,7 +2325,7 @@ Documentation
 - Added page on the internal design of xarray objects.
   (:pull:`7991`) By `Tom Nicholas <https://github.com/TomNicholas>`_.
 - Added examples to docstrings of :py:meth:`Dataset.assign_attrs`, :py:meth:`Dataset.broadcast_equals`,
-  :py:meth:`Dataset.equals`, :py:meth:`Dataset.identical`, :py:meth:`Dataset.expand_dims`,:py:meth:`Dataset.drop_vars`
+  :py:meth:`Dataset.equals`, :py:meth:`Dataset.identical`, :py:meth:`Dataset.expand_dims`, :py:meth:`Dataset.drop_vars`
   (:issue:`6793`, :pull:`7937`) By `Harshitha <https://github.com/harshitha1201>`_.
 - Add docstrings for the :py:class:`Index` base class and add some documentation on how to
   create custom, Xarray-compatible indexes (:pull:`6975`)
@@ -1706,7 +2370,7 @@ Documentation
 ~~~~~~~~~~~~~
 
 - Added examples to docstrings of :py:meth:`Dataset.assign_attrs`, :py:meth:`Dataset.broadcast_equals`,
-  :py:meth:`Dataset.equals`, :py:meth:`Dataset.identical`, :py:meth:`Dataset.expand_dims`,:py:meth:`Dataset.drop_vars`
+  :py:meth:`Dataset.equals`, :py:meth:`Dataset.identical`, :py:meth:`Dataset.expand_dims`, :py:meth:`Dataset.drop_vars`
   (:issue:`6793`, :pull:`7937`) By `Harshitha <https://github.com/harshitha1201>`_.
 - Added page on wrapping chunked numpy-like arrays as alternatives to dask arrays.
   (:pull:`7951`) By `Tom Nicholas <https://github.com/TomNicholas>`_.
@@ -1838,7 +2502,7 @@ Bug fixes
 Internal Changes
 ~~~~~~~~~~~~~~~~
 - Experimental support for wrapping chunked array libraries other than dask.
-  A new ABC is defined - :py:class:`xr.core.parallelcompat.ChunkManagerEntrypoint` - which can be subclassed and then
+  A new ABC is defined - :py:class:`xr.namedarray.parallelcompat.ChunkManagerEntrypoint` - which can be subclassed and then
   registered by alternative chunked array implementations. (:issue:`6807`, :pull:`7019`)
   By `Tom Nicholas <https://github.com/TomNicholas>`_.
 
@@ -3810,11 +4474,11 @@ Documentation
 - Raise a more informative error when :py:meth:`DataArray.to_dataframe` is
   is called on a scalar, (:issue:`4228`);
   By `Pieter Gijsbers <https://github.com/pgijsbers>`_.
-- Fix grammar and typos in the :doc:`contributing` guide (:pull:`4545`).
+- Fix grammar and typos in the :ref:`contributing` guide (:pull:`4545`).
   By `Sahid Velji <https://github.com/sahidvelji>`_.
 - Fix grammar and typos in the :doc:`user-guide/io` guide (:pull:`4553`).
   By `Sahid Velji <https://github.com/sahidvelji>`_.
-- Update link to NumPy docstring standard in the :doc:`contributing` guide (:pull:`4558`).
+- Update link to NumPy docstring standard in the :ref:`contributing` guide (:pull:`4558`).
   By `Sahid Velji <https://github.com/sahidvelji>`_.
 - Add docstrings to ``isnull`` and ``notnull``, and fix the displayed signature
   (:issue:`2760`, :pull:`4618`).
@@ -4251,7 +4915,7 @@ New Features
 ~~~~~~~~~~~~
 
 - Weighted array reductions are now supported via the new :py:meth:`DataArray.weighted`
-  and :py:meth:`Dataset.weighted` methods. See :ref:`comput.weighted`. (:issue:`422`, :pull:`2922`).
+  and :py:meth:`Dataset.weighted` methods. See :ref:`compute.weighted`. (:issue:`422`, :pull:`2922`).
   By `Mathias Hauser <https://github.com/mathause>`_.
 - The new jupyter notebook repr (``Dataset._repr_html_`` and
   ``DataArray._repr_html_``) (introduced in 0.14.1) is now on by default. To
@@ -6070,7 +6734,7 @@ Enhancements
   (:issue:`1617`). This enables using NumPy ufuncs directly on
   ``xarray.Dataset`` objects with recent versions of NumPy (v1.13 and newer):
 
-  .. ipython:: python
+  .. code:: python
 
       ds = xr.Dataset({"a": 1})
       np.sin(ds)
@@ -6121,7 +6785,7 @@ Documentation
 - Added apply_ufunc example to :ref:`/examples/weather-data.ipynb#Toy-weather-data` (:issue:`1844`).
   By `Liam Brannigan <https://github.com/braaannigan>`_.
 - New entry ``Why don’t aggregations return Python scalars?`` in the
-  :doc:`getting-started-guide/faq` (:issue:`1726`).
+  :ref:`faq` (:issue:`1726`).
   By `0x0L <https://github.com/0x0L>`_.
 
 Enhancements
@@ -6162,7 +6826,7 @@ Enhancements
 
 - Reduce methods such as :py:func:`DataArray.sum()` now handles object-type array.
 
-  .. ipython:: python
+  .. code:: python
 
       da = xr.DataArray(np.array([True, False, np.nan], dtype=object), dims="x")
       da.sum()
@@ -6199,7 +6863,7 @@ Enhancements
 
 .. _Zarr: http://zarr.readthedocs.io/
 
-.. _Iris: http://scitools.org.uk/iris
+.. _Iris: http://scitools-iris.readthedocs.io
 
 .. _netcdftime: https://unidata.github.io/netcdftime
 
@@ -6316,23 +6980,15 @@ Breaking changes
 
   Old syntax:
 
-  .. ipython::
-    :verbatim:
+  .. jupyter-input::
 
-    In [1]: ds.resample("24H", dim="time", how="max")
-    Out[1]:
-    <xarray.Dataset>
-    [...]
+    ds.resample("24H", dim="time", how="max")
 
   New syntax:
 
-  .. ipython::
-    :verbatim:
+  .. jupyter-input::
 
-    In [1]: ds.resample(time="24H").max()
-    Out[1]:
-    <xarray.Dataset>
-    [...]
+    ds.resample(time="24H").max()
 
   Note that both versions are currently supported, but using the old syntax will
   produce a warning encouraging users to adopt the new syntax.
@@ -6382,7 +7038,7 @@ Enhancements
 - New helper function :py:func:`~xarray.apply_ufunc` for wrapping functions
   written to work on NumPy arrays to support labels on xarray objects
   (:issue:`770`). ``apply_ufunc`` also support automatic parallelization for
-  many functions with dask. See :ref:`comput.wrapping-custom` and
+  many functions with dask. See :ref:`compute.wrapping-custom` and
   :ref:`dask.automatic-parallelization` for details.
   By `Stephan Hoyer <https://github.com/shoyer>`_.
 
@@ -6394,21 +7050,25 @@ Enhancements
 - New function :py:func:`~xarray.where` for conditionally switching between
   values in xarray objects, like :py:func:`numpy.where`:
 
-  .. ipython::
-    :verbatim:
 
-    In [1]: import xarray as xr
+  .. jupyter-input::
 
-    In [2]: arr = xr.DataArray([[1, 2, 3], [4, 5, 6]], dims=("x", "y"))
+    import xarray as xr
+
+    arr = xr.DataArray([[1, 2, 3], [4, 5, 6]], dims=("x", "y"))
+
+    xr.where(arr % 2, "even", "odd")
+
+
+  .. jupyter-output::
 
-    In [3]: xr.where(arr % 2, "even", "odd")
-    Out[3]:
     <xarray.DataArray (x: 2, y: 3)>
     array([['even', 'odd', 'even'],
            ['odd', 'even', 'odd']],
           dtype='<U4')
     Dimensions without coordinates: x, y
 
+
   Equivalently, the :py:meth:`~xarray.Dataset.where` method also now supports
   the ``other`` argument, for filling with a value other than ``NaN``
   (:issue:`576`).
@@ -6450,19 +7110,16 @@ Enhancements
   ``xarray.to_netcdf``, and :py:func:`~xarray.save_mfdataset`
   (:issue:`799`):
 
-  .. ipython::
-    :verbatim:
+  .. jupyter-input::
 
-    In [2]: from pathlib import Path  # In Python 2, use pathlib2!
+    from pathlib import Path  # In Python 2, use pathlib2!
 
-    In [3]: data_dir = Path("data/")
+    data_dir = Path("data/")
 
-    In [4]: one_file = data_dir / "dta_for_month_01.nc"
+    one_file = data_dir / "dta_for_month_01.nc"
+
+    xr.open_dataset(one_file)
 
-    In [5]: xr.open_dataset(one_file)
-    Out[5]:
-    <xarray.Dataset>
-    [...]
 
   By `Willi Rath <https://github.com/willirath>`_.
 
@@ -6919,17 +7576,19 @@ Breaking changes
   by their appearance in list of "Dimensions without coordinates" in the
   ``Dataset`` or ``DataArray`` repr:
 
-  .. ipython::
-    :verbatim:
+  .. jupyter-input::
+
+    xr.Dataset({"foo": (("x", "y"), [[1, 2]])})
+
+  .. jupyter-output::
 
-    In [1]: xr.Dataset({"foo": (("x", "y"), [[1, 2]])})
-    Out[1]:
     <xarray.Dataset>
     Dimensions:  (x: 1, y: 2)
     Dimensions without coordinates: x, y
     Data variables:
         foo      (x, y) int64 1 2
 
+
   This has a number of implications:
 
   - :py:func:`~align` and :py:meth:`~Dataset.reindex` can now error, if
@@ -7376,16 +8035,16 @@ Enhancements
 - Rolling window operations on DataArray objects are now supported via a new
   :py:meth:`DataArray.rolling` method. For example:
 
-  .. ipython::
-    :verbatim:
+  .. jupyter-input::
 
-    In [1]: import xarray as xr
-       ...: import numpy as np
+    import xarray as xr
+    import numpy as np
 
-    In [2]: arr = xr.DataArray(np.arange(0, 7.5, 0.5).reshape(3, 5), dims=("x", "y"))
+    arr = xr.DataArray(np.arange(0, 7.5, 0.5).reshape(3, 5), dims=("x", "y"))
+    arr
+
+  .. jupyter-output::
 
-    In [3]: arr
-    Out[3]:
     <xarray.DataArray (x: 3, y: 5)>
     array([[ 0. ,  0.5,  1. ,  1.5,  2. ],
            [ 2.5,  3. ,  3.5,  4. ,  4.5],
@@ -7394,8 +8053,12 @@ Enhancements
       * x        (x) int64 0 1 2
       * y        (y) int64 0 1 2 3 4
 
-    In [4]: arr.rolling(y=3, min_periods=2).mean()
-    Out[4]:
+  .. jupyter-input::
+
+    arr.rolling(y=3, min_periods=2).mean()
+
+  .. jupyter-output::
+
     <xarray.DataArray (x: 3, y: 5)>
     array([[  nan,  0.25,  0.5 ,  1.  ,  1.5 ],
            [  nan,  2.75,  3.  ,  3.5 ,  4.  ],
@@ -7404,7 +8067,7 @@ Enhancements
       * x        (x) int64 0 1 2
       * y        (y) int64 0 1 2 3 4
 
-  See :ref:`comput.rolling` for more details. By
+  See :ref:`compute.rolling` for more details. By
   `Joe Hamman <https://github.com/jhamman>`_.
 
 Bug fixes
@@ -7518,11 +8181,12 @@ Breaking changes
   corresponding coordinate. You will now need to provide coordinate labels
   explicitly. Here's the old behavior:
 
-  .. ipython::
-    :verbatim:
+  .. jupyter-input::
+
+    xray.DataArray([4, 5, 6], dims="x", name="x")
+
+  .. jupyter-output::
 
-    In [2]: xray.DataArray([4, 5, 6], dims="x", name="x")
-    Out[2]:
     <xray.DataArray 'x' (x: 3)>
     array([4, 5, 6])
     Coordinates:
@@ -7530,11 +8194,12 @@ Breaking changes
 
   and the new behavior (compare the values of the ``x`` coordinate):
 
-  .. ipython::
-    :verbatim:
+  .. jupyter-input::
+
+    xray.DataArray([4, 5, 6], dims="x", name="x")
+
+  .. jupyter-output::
 
-    In [2]: xray.DataArray([4, 5, 6], dims="x", name="x")
-    Out[2]:
     <xray.DataArray 'x' (x: 3)>
     array([4, 5, 6])
     Coordinates:
@@ -7553,30 +8218,39 @@ Enhancements
 - Basic support for :py:class:`~pandas.MultiIndex` coordinates on xray objects, including
   indexing, :py:meth:`~DataArray.stack` and :py:meth:`~DataArray.unstack`:
 
-  .. ipython::
-    :verbatim:
+  .. jupyter-input::
+
+    df = pd.DataFrame({"foo": range(3), "x": ["a", "b", "b"], "y": [0, 0, 1]})
+
+    s = df.set_index(["x", "y"])["foo"]
 
-    In [7]: df = pd.DataFrame({"foo": range(3), "x": ["a", "b", "b"], "y": [0, 0, 1]})
+    arr = xray.DataArray(s, dims="z")
 
-    In [8]: s = df.set_index(["x", "y"])["foo"]
+    arr
 
-    In [12]: arr = xray.DataArray(s, dims="z")
+  .. jupyter-output::
 
-    In [13]: arr
-    Out[13]:
     <xray.DataArray 'foo' (z: 3)>
     array([0, 1, 2])
     Coordinates:
       * z        (z) object ('a', 0) ('b', 0) ('b', 1)
 
-    In [19]: arr.indexes["z"]
-    Out[19]:
+  .. jupyter-input::
+
+    arr.indexes["z"]
+
+  .. jupyter-output::
+
     MultiIndex(levels=[[u'a', u'b'], [0, 1]],
                labels=[[0, 1, 1], [0, 0, 1]],
                names=[u'x', u'y'])
 
-    In [14]: arr.unstack("z")
-    Out[14]:
+  .. jupyter-input::
+
+    arr.unstack("z")
+
+  .. jupyter-output::
+
     <xray.DataArray 'foo' (x: 2, y: 2)>
     array([[  0.,  nan],
            [  1.,   2.]])
@@ -7584,8 +8258,12 @@ Enhancements
       * x        (x) object 'a' 'b'
       * y        (y) int64 0 1
 
-    In [26]: arr.unstack("z").stack(z=("x", "y"))
-    Out[26]:
+  .. jupyter-input::
+
+    arr.unstack("z").stack(z=("x", "y"))
+
+  .. jupyter-output::
+
     <xray.DataArray 'foo' (z: 4)>
     array([  0.,  nan,   1.,   2.])
     Coordinates:
@@ -7610,8 +8288,7 @@ Enhancements
 - New ``xray.Dataset.shift`` and ``xray.Dataset.roll`` methods
   for shifting/rotating datasets or arrays along a dimension:
 
-  .. ipython:: python
-      :okwarning:
+  .. code:: python
 
       array = xray.DataArray([5, 6, 7, 8], dims="x")
       array.shift(x=2)
@@ -7626,7 +8303,7 @@ Enhancements
 - New function ``xray.broadcast`` for explicitly broadcasting
   ``DataArray`` and ``Dataset`` objects against each other. For example:
 
-  .. ipython:: python
+  .. code:: python
 
       a = xray.DataArray([1, 2, 3], dims="x")
       b = xray.DataArray([5, 6], dims="y")
@@ -7696,13 +8373,14 @@ Enhancements
   the ``tolerance`` argument for controlling nearest-neighbor selection
   (:issue:`629`):
 
-  .. ipython::
-    :verbatim:
+  .. jupyter-input::
+
+    array = xray.DataArray([1, 2, 3], dims="x")
 
-    In [5]: array = xray.DataArray([1, 2, 3], dims="x")
+    array.reindex(x=[0.9, 1.5], method="nearest", tolerance=0.2)
+
+  .. jupyter-output::
 
-    In [6]: array.reindex(x=[0.9, 1.5], method="nearest", tolerance=0.2)
-    Out[6]:
     <xray.DataArray (x: 2)>
     array([  2.,  nan])
     Coordinates:
@@ -7778,17 +8456,18 @@ Enhancements
 - Added ``xray.Dataset.isel_points`` and ``xray.Dataset.sel_points``
   to support pointwise indexing of Datasets and DataArrays (:issue:`475`).
 
-  .. ipython::
-    :verbatim:
+  .. jupyter-input::
 
-    In [1]: da = xray.DataArray(
+    da = xray.DataArray(
        ...:     np.arange(56).reshape((7, 8)),
        ...:     coords={"x": list("abcdefg"), "y": 10 * np.arange(8)},
        ...:     dims=["x", "y"],
        ...: )
 
-    In [2]: da
-    Out[2]:
+    da
+
+  .. jupyter-output::
+
     <xray.DataArray (x: 7, y: 8)>
     array([[ 0,  1,  2,  3,  4,  5,  6,  7],
            [ 8,  9, 10, 11, 12, 13, 14, 15],
@@ -7801,9 +8480,13 @@ Enhancements
     * y        (y) int64 0 10 20 30 40 50 60 70
     * x        (x) |S1 'a' 'b' 'c' 'd' 'e' 'f' 'g'
 
+  .. jupyter-input::
+
     # we can index by position along each dimension
-    In [3]: da.isel_points(x=[0, 1, 6], y=[0, 1, 0], dim="points")
-    Out[3]:
+    da.isel_points(x=[0, 1, 6], y=[0, 1, 0], dim="points")
+
+  .. jupyter-output::
+
     <xray.DataArray (points: 3)>
     array([ 0,  9, 48])
     Coordinates:
@@ -7811,9 +8494,13 @@ Enhancements
         x        (points) |S1 'a' 'b' 'g'
       * points   (points) int64 0 1 2
 
+  .. jupyter-input::
+
     # or equivalently by label
-    In [9]: da.sel_points(x=["a", "b", "g"], y=[0, 10, 0], dim="points")
-    Out[9]:
+    da.sel_points(x=["a", "b", "g"], y=[0, 10, 0], dim="points")
+
+  .. jupyter-output::
+
     <xray.DataArray (points: 3)>
     array([ 0,  9, 48])
     Coordinates:
@@ -7824,12 +8511,10 @@ Enhancements
 - New ``xray.Dataset.where`` method for masking xray objects according
   to some criteria. This works particularly well with multi-dimensional data:
 
-  .. ipython:: python
+  .. code:: python
 
       ds = xray.Dataset(coords={"x": range(100), "y": range(100)})
       ds["distance"] = np.sqrt(ds.x**2 + ds.y**2)
-
-      @savefig where_example.png width=4in height=4in
       ds.distance.where(ds.distance < 100).plot()
 
 - Added new methods ``xray.DataArray.diff`` and ``xray.Dataset.diff``
@@ -7838,7 +8523,7 @@ Enhancements
 - New ``xray.DataArray.to_masked_array`` convenience method for
   returning a numpy.ma.MaskedArray.
 
-  .. ipython:: python
+  .. code:: python
 
       da = xray.DataArray(np.random.random_sample(size=(5, 4)))
       da.where(da < 0.5)
@@ -7897,14 +8582,13 @@ Enhancements
   with dask.array. For example, to save a dataset too big to fit into memory
   to one file per year, we could write:
 
-  .. ipython::
-    :verbatim:
+  .. jupyter-input::
 
-    In [1]: years, datasets = zip(*ds.groupby("time.year"))
+    years, datasets = zip(*ds.groupby("time.year"))
 
-    In [2]: paths = ["%s.nc" % y for y in years]
+    paths = ["%s.nc" % y for y in years]
 
-    In [3]: xray.save_mfdataset(datasets, paths)
+    xray.save_mfdataset(datasets, paths)
 
 Bug fixes
 ~~~~~~~~~
@@ -7972,13 +8656,14 @@ Backwards incompatible changes
   surprising behavior, where the behavior of groupby and concat operations
   could depend on runtime values (:issue:`268`). For example:
 
-  .. ipython::
-    :verbatim:
+  .. jupyter-input::
 
-    In [1]: ds = xray.Dataset({"x": 0})
+    ds = xray.Dataset({"x": 0})
+
+    xray.concat([ds, ds], dim="y")
+
+  .. jupyter-output::
 
-    In [2]: xray.concat([ds, ds], dim="y")
-    Out[2]:
     <xray.Dataset>
     Dimensions:  ()
     Coordinates:
@@ -7988,12 +8673,18 @@ Backwards incompatible changes
 
   Now, the default always concatenates data variables:
 
-  .. ipython:: python
-      :suppress:
+  .. code:: python
 
-      ds = xray.Dataset({"x": 0})
+    In [1]: ds = xray.Dataset({"x": 0})
 
-  .. ipython:: python
+    In [2]: xray.concat([ds, ds], dim="y")
+    Out[2]:
+    <xarray.Dataset> Size: 16B
+    Dimensions:  (y: 2)
+    Dimensions without coordinates: y
+    Data variables:
+        x        (y) int64 16B 0 0
+  .. code:: python
 
       xray.concat([ds, ds], dim="y")
 
@@ -8006,7 +8697,7 @@ Enhancements
   ``xray.DataArray.to_dataset`` methods make it easy to switch back
   and forth between arrays and datasets:
 
-  .. ipython:: python
+  .. code:: python
 
       ds = xray.Dataset(
           {"a": 1, "b": ("x", [1, 2, 3])},
@@ -8019,7 +8710,7 @@ Enhancements
 - New ``xray.Dataset.fillna`` method to fill missing values, modeled
   off the pandas method of the same name:
 
-  .. ipython:: python
+  .. code:: python
 
       array = xray.DataArray([np.nan, 1, np.nan, 3], dims="x")
       array.fillna(0)
@@ -8032,7 +8723,7 @@ Enhancements
   methods patterned off the new :py:meth:`DataFrame.assign <pandas.DataFrame.assign>`
   method in pandas:
 
-  .. ipython:: python
+  .. code:: python
 
       ds = xray.Dataset({"y": ("x", [1, 2, 3])})
       ds.assign(z=lambda ds: ds.y**2)
@@ -8046,11 +8737,12 @@ Enhancements
 
   .. use verbatim because I can't seem to install pandas 0.16.1 on RTD :(
 
-  .. ipython::
-      :verbatim:
+  .. jupyter-input::
+
+      ds.sel(x=1.1, method="nearest")
+
+  .. jupyter-output::
 
-      In [12]: ds.sel(x=1.1, method="nearest")
-      Out[12]:
       <xray.Dataset>
       Dimensions:  ()
       Coordinates:
@@ -8058,8 +8750,12 @@ Enhancements
       Data variables:
           y        int64 2
 
-      In [13]: ds.sel(x=[1.1, 2.1], method="pad")
-      Out[13]:
+  .. jupyter-input::
+
+      ds.sel(x=[1.1, 2.1], method="pad")
+
+  .. jupyter-output::
+
       <xray.Dataset>
       Dimensions:  (x: 2)
       Coordinates:
@@ -8082,7 +8778,7 @@ Enhancements
   It can be used either as a context manager, in which case the default is restored
   outside the context:
 
-  .. ipython:: python
+  .. code:: python
 
       ds = xray.Dataset({"x": np.arange(1000)})
       with xray.set_options(display_width=40):
@@ -8090,10 +8786,9 @@ Enhancements
 
   Or to set a global option:
 
-  .. ipython::
-      :verbatim:
+  .. jupyter-input::
 
-      In [1]: xray.set_options(display_width=80)
+      xray.set_options(display_width=80)
 
   The default value for the ``display_width`` option is 80.
 
@@ -8121,8 +8816,7 @@ Enhancements
   a new temporal resolution. The syntax is the `same as pandas`_, except you
   need to supply the time dimension explicitly:
 
-  .. ipython:: python
-      :verbatim:
+  .. code:: python
 
       time = pd.date_range("2000-01-01", freq="6H", periods=10)
       array = xray.DataArray(np.arange(10), [("time", time)])
@@ -8131,31 +8825,27 @@ Enhancements
   You can specify how to do the resampling with the ``how`` argument and other
   options such as ``closed`` and ``label`` let you control labeling:
 
-  .. ipython:: python
-      :verbatim:
+  .. code:: python
 
       array.resample("1D", dim="time", how="sum", label="right")
 
   If the desired temporal resolution is higher than the original data
   (upsampling), xray will insert missing values:
 
-  .. ipython:: python
-      :verbatim:
+  .. code:: python
 
       array.resample("3H", "time")
 
 - ``first`` and ``last`` methods on groupby objects let you take the first or
   last examples from each group along the grouped axis:
 
-  .. ipython:: python
-      :verbatim:
+  .. code:: python
 
       array.groupby("time.day").first()
 
   These methods combine well with ``resample``:
 
-  .. ipython:: python
-      :verbatim:
+  .. code:: python
 
       array.resample("1D", dim="time", how="first")
 
@@ -8163,10 +8853,9 @@ Enhancements
 - ``xray.Dataset.swap_dims`` allows for easily swapping one dimension
   out for another:
 
-  .. ipython:: python
+  .. code:: python
 
       ds = xray.Dataset({"x": range(3), "y": ("x", list("abc"))})
-      ds
       ds.swap_dims({"x": "y"})
 
   This was possible in earlier versions of xray, but required some contortions.
@@ -8211,7 +8900,7 @@ Breaking changes
   :ref:`For arithmetic<math automatic alignment>`, we align
   based on the **intersection** of labels:
 
-  .. ipython:: python
+  .. code:: python
 
       lhs = xray.DataArray([1, 2, 3], [("x", [0, 1, 2])])
       rhs = xray.DataArray([2, 3, 4], [("x", [1, 2, 3])])
@@ -8220,21 +8909,21 @@ Breaking changes
   :ref:`For dataset construction and merging<merge>`, we align based on the
   **union** of labels:
 
-  .. ipython:: python
+  .. code:: python
 
       xray.Dataset({"foo": lhs, "bar": rhs})
 
   :ref:`For update and __setitem__<update>`, we align based on the **original**
   object:
 
-  .. ipython:: python
+  .. code:: python
 
       lhs.coords["rhs"] = rhs
       lhs
 
 - Aggregations like ``mean`` or ``median`` now skip missing values by default:
 
-  .. ipython:: python
+  .. code:: python
 
       xray.DataArray([1, 2, np.nan, 3]).mean()
 
@@ -8250,7 +8939,7 @@ Breaking changes
   persists through arithmetic, even though it has different shapes on each
   DataArray:
 
-  .. ipython:: python
+  .. code:: python
 
       a = xray.DataArray([1, 2], coords={"c": 0}, dims="x")
       b = xray.DataArray([1, 2], coords={"c": ("x", [0, 0])}, dims="x")
@@ -8262,7 +8951,7 @@ Breaking changes
   the name ``'month'``, not ``'time.month'`` (:issue:`345`). This makes it
   easier to index the resulting arrays when they are used with ``groupby``:
 
-  .. ipython:: python
+  .. code:: python
 
       time = xray.DataArray(
           pd.date_range("2000-01-01", periods=365), dims="time", name="time"
@@ -8305,7 +8994,7 @@ Enhancements
 - Support for ``xray.Dataset.reindex`` with a fill method. This
   provides a useful shortcut for upsampling:
 
-  .. ipython:: python
+  .. code:: python
 
       data = xray.DataArray([1, 2, 3], [("x", range(3))])
       data.reindex(x=[0.5, 1, 1.5, 2, 2.5], method="pad")
@@ -8326,8 +9015,7 @@ Enhancements
 - The new ``xray.Dataset.drop`` and ``xray.DataArray.drop`` methods
   makes it easy to drop explicitly listed variables or index labels:
 
-  .. ipython:: python
-      :okwarning:
+  .. code:: python
 
       # drop variables
       ds = xray.Dataset({"x": 0, "y": 1})
@@ -8400,7 +9088,7 @@ Backwards incompatible changes
   ``datetime64[ns]`` arrays when stored in an xray object, using machinery
   borrowed from pandas:
 
-  .. ipython:: python
+  .. code:: python
 
       from datetime import datetime
 
@@ -8418,7 +9106,7 @@ Enhancements
 - Due to popular demand, we have added experimental attribute style access as
   a shortcut for dataset variables, coordinates and attributes:
 
-  .. ipython:: python
+  .. code:: python
 
       ds = xray.Dataset({"tmin": ([], 25, {"units": "celsius"})})
       ds.tmin.units
@@ -8429,7 +9117,7 @@ Enhancements
 - You can now use a dictionary for indexing with labeled dimensions. This
   provides a safe way to do assignment with labeled dimensions:
 
-  .. ipython:: python
+  .. code:: python
 
       array = xray.DataArray(np.zeros(5), dims=["x"])
       array[dict(x=slice(3))] = 1
diff -pruN 2025.03.1-8/properties/test_index_manipulation.py 2025.10.1-1/properties/test_index_manipulation.py
--- 2025.03.1-8/properties/test_index_manipulation.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/properties/test_index_manipulation.py	2025-10-10 10:38:05.000000000 +0000
@@ -261,8 +261,6 @@ DatasetTest = DatasetStateMachine.TestCa
 
 @pytest.mark.skip(reason="failure detected by hypothesis")
 def test_unstack_object():
-    import xarray as xr
-
     ds = xr.Dataset()
     ds["0"] = np.array(["", "\x000"], dtype=object)
     ds.stack({"1": ["0"]}).unstack()
@@ -270,8 +268,6 @@ def test_unstack_object():
 
 @pytest.mark.skip(reason="failure detected by hypothesis")
 def test_unstack_timedelta_index():
-    import xarray as xr
-
     ds = xr.Dataset()
     ds["0"] = np.array([0, 1, 2, 3], dtype="timedelta64[ns]")
     ds.stack({"1": ["0"]}).unstack()
diff -pruN 2025.03.1-8/properties/test_pandas_roundtrip.py 2025.10.1-1/properties/test_pandas_roundtrip.py
--- 2025.03.1-8/properties/test_pandas_roundtrip.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/properties/test_pandas_roundtrip.py	2025-10-10 10:38:05.000000000 +0000
@@ -15,6 +15,7 @@ import hypothesis.extra.numpy as npst  #
 import hypothesis.extra.pandas as pdst  # isort:skip
 import hypothesis.strategies as st  # isort:skip
 from hypothesis import given  # isort:skip
+from xarray.tests import has_pyarrow
 
 numeric_dtypes = st.one_of(
     npst.unsigned_integer_dtypes(endianness="="),
@@ -134,10 +135,39 @@ def test_roundtrip_pandas_dataframe_date
     xr.testing.assert_identical(dataset, roundtripped.to_xarray())
 
 
-def test_roundtrip_1d_pandas_extension_array() -> None:
-    df = pd.DataFrame({"cat": pd.Categorical(["a", "b", "c"])})
-    arr = xr.Dataset.from_dataframe(df)["cat"]
+@pytest.mark.parametrize(
+    "extension_array",
+    [
+        pd.Categorical(["a", "b", "c"]),
+        pd.array(["a", "b", "c"], dtype="string"),
+        pd.arrays.IntervalArray(
+            [pd.Interval(0, 1), pd.Interval(1, 5), pd.Interval(2, 6)]
+        ),
+        pd.arrays.TimedeltaArray._from_sequence(pd.TimedeltaIndex(["1h", "2h", "3h"])),
+        pd.arrays.DatetimeArray._from_sequence(
+            pd.DatetimeIndex(["2023-01-01", "2023-01-02", "2023-01-03"], freq="D")
+        ),
+        np.array([1, 2, 3], dtype="int64"),
+    ]
+    + ([pd.array([1, 2, 3], dtype="int64[pyarrow]")] if has_pyarrow else []),
+    ids=["cat", "string", "interval", "timedelta", "datetime", "numpy"]
+    + (["pyarrow"] if has_pyarrow else []),
+)
+@pytest.mark.parametrize("is_index", [True, False])
+def test_roundtrip_1d_pandas_extension_array(extension_array, is_index) -> None:
+    df = pd.DataFrame({"arr": extension_array})
+    if is_index:
+        df = df.set_index("arr")
+    arr = xr.Dataset.from_dataframe(df)["arr"]
     roundtripped = arr.to_pandas()
-    assert (df["cat"] == roundtripped).all()
-    assert df["cat"].dtype == roundtripped.dtype
-    xr.testing.assert_identical(arr, roundtripped.to_xarray())
+    df_arr_to_test = df.index if is_index else df["arr"]
+    assert (df_arr_to_test == roundtripped).all()
+    # `NumpyExtensionArray` types are not roundtripped, including `StringArray` which subtypes.
+    if isinstance(extension_array, pd.arrays.NumpyExtensionArray):  # type: ignore[attr-defined]
+        assert isinstance(arr.data, np.ndarray)
+    else:
+        assert (
+            df_arr_to_test.dtype
+            == (roundtripped.index if is_index else roundtripped).dtype
+        )
+        xr.testing.assert_identical(arr, roundtripped.to_xarray())
diff -pruN 2025.03.1-8/properties/test_properties.py 2025.10.1-1/properties/test_properties.py
--- 2025.03.1-8/properties/test_properties.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/properties/test_properties.py	2025-10-10 10:38:05.000000000 +0000
@@ -1,11 +1,15 @@
+import itertools
+
 import pytest
 
 pytest.importorskip("hypothesis")
 
-from hypothesis import given
+import hypothesis.strategies as st
+from hypothesis import given, note
 
 import xarray as xr
 import xarray.testing.strategies as xrst
+from xarray.groupers import find_independent_seasons, season_to_month_tuple
 
 
 @given(attrs=xrst.simple_attrs)
@@ -15,3 +19,45 @@ def test_assert_identical(attrs):
 
     ds = xr.Dataset(attrs=attrs)
     xr.testing.assert_identical(ds, ds.copy(deep=True))
+
+
+@given(
+    roll=st.integers(min_value=0, max_value=12),
+    breaks=st.lists(
+        st.integers(min_value=0, max_value=11), min_size=1, max_size=12, unique=True
+    ),
+)
+def test_property_season_month_tuple(roll, breaks):
+    chars = list("JFMAMJJASOND")
+    months = tuple(range(1, 13))
+
+    rolled_chars = chars[roll:] + chars[:roll]
+    rolled_months = months[roll:] + months[:roll]
+    breaks = sorted(breaks)
+    if breaks[0] != 0:
+        breaks = [0] + breaks
+    if breaks[-1] != 12:
+        breaks = breaks + [12]
+    seasons = tuple(
+        "".join(rolled_chars[start:stop]) for start, stop in itertools.pairwise(breaks)
+    )
+    actual = season_to_month_tuple(seasons)
+    expected = tuple(
+        rolled_months[start:stop] for start, stop in itertools.pairwise(breaks)
+    )
+    assert expected == actual
+
+
+@given(data=st.data(), nmonths=st.integers(min_value=1, max_value=11))
+def test_property_find_independent_seasons(data, nmonths):
+    chars = "JFMAMJJASOND"
+    # if stride > nmonths, then we can't infer season order
+    stride = data.draw(st.integers(min_value=1, max_value=nmonths))
+    chars = chars + chars[:nmonths]
+    seasons = [list(chars[i : i + nmonths]) for i in range(0, 12, stride)]
+    note(seasons)
+    groups = find_independent_seasons(seasons)
+    for group in groups:
+        inds = tuple(itertools.chain(*group.inds))
+        assert len(inds) == len(set(inds))
+        assert len(group.codes) == len(set(group.codes))
diff -pruN 2025.03.1-8/pyproject.toml 2025.10.1-1/pyproject.toml
--- 2025.03.1-8/pyproject.toml	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/pyproject.toml	2025-10-10 10:38:05.000000000 +0000
@@ -2,12 +2,10 @@
 authors = [{ name = "xarray Developers", email = "xarray@googlegroups.com" }]
 classifiers = [
   "Development Status :: 5 - Production/Stable",
-  "License :: OSI Approved :: Apache Software License",
   "Operating System :: OS Independent",
   "Intended Audience :: Science/Research",
   "Programming Language :: Python",
   "Programming Language :: Python :: 3",
-  "Programming Language :: Python :: 3.10",
   "Programming Language :: Python :: 3.11",
   "Programming Language :: Python :: 3.12",
   "Programming Language :: Python :: 3.13",
@@ -15,12 +13,12 @@ classifiers = [
 ]
 description = "N-D labeled arrays and datasets in Python"
 dynamic = ["version"]
-license = { text = "Apache-2.0" }
+license = "Apache-2.0"
 name = "xarray"
 readme = "README.md"
-requires-python = ">=3.10"
+requires-python = ">=3.11"
 
-dependencies = ["numpy>=1.24", "packaging>=23.2", "pandas>=2.1"]
+dependencies = ["numpy>=1.26", "packaging>=24.1", "pandas>=2.2"]
 
 # We don't encode minimum requirements here (though if we can write a script to
 # generate the text from `min_deps_check.py`, that's welcome...). We do add
@@ -28,23 +26,31 @@ dependencies = ["numpy>=1.24", "packagin
 # note that it's not a direct dependency of xarray.
 
 [project.optional-dependencies]
-accel = ["scipy", "bottleneck", "numbagg", "numba>=0.54", "flox", "opt_einsum"]
+accel = [
+  "scipy>=1.13",
+  "bottleneck",
+  "numbagg>=0.8",
+  "numba>=0.62",  # numba 0.62 added support for numpy 2.3
+  "flox>=0.9",
+  "opt_einsum",
+]
 complete = ["xarray[accel,etc,io,parallel,viz]"]
 io = [
-  "netCDF4",
+  "netCDF4>=1.6.0",
   "h5netcdf",
-  "scipy",
-  'pydap; python_version<"3.10"',
-  "zarr",
+  "pydap",
+  "scipy>=1.13",
+  "zarr>=2.18",
   "fsspec",
   "cftime",
   "pooch",
 ]
-etc = ["sparse"]
+etc = ["sparse>=0.15"]
 parallel = ["dask[complete]"]
-viz = ["cartopy", "matplotlib", "nc-time-axis", "seaborn"]
+viz = ["cartopy>=0.23", "matplotlib", "nc-time-axis", "seaborn"]
 types = [
   "pandas-stubs",
+  "scipy-stubs",
   "types-PyYAML",
   "types-Pygments",
   "types-colorama",
@@ -58,6 +64,7 @@ types = [
   "types-openpyxl",
   "types-python-dateutil",
   "types-pytz",
+  "types-requests",
   "types-setuptools",
 ]
 
@@ -65,7 +72,7 @@ types = [
 dev = [
   "hypothesis",
   "jinja2",
-  "mypy",
+  "mypy==1.18.1",
   "pre-commit",
   "pytest",
   "pytest-cov",
@@ -73,6 +80,7 @@ dev = [
   "pytest-mypy-plugins",
   "pytest-timeout",
   "pytest-xdist",
+  "pytest-asyncio",
   "ruff>=0.8.0",
   "sphinx",
   "sphinx_autosummary_accessors",
@@ -91,10 +99,10 @@ dask = "xarray.namedarray.daskmanager:Da
 
 [build-system]
 build-backend = "setuptools.build_meta"
-requires = ["setuptools>=42", "setuptools-scm>=7"]
+requires = ["setuptools>=77.0.3", "setuptools-scm>=8"]
 
-[tool.setuptools]
-packages = ["xarray"]
+[tool.setuptools.packages.find]
+include = ["xarray*"]
 
 [tool.setuptools_scm]
 fallback_version = "9999"
@@ -174,31 +182,6 @@ module = [
   "xarray.indexes.*",
   "xarray.tests.*",
 ]
-# This then excludes some modules from the above list. (So ideally we remove
-# from here in time...)
-[[tool.mypy.overrides]]
-check_untyped_defs = false
-module = [
-  "xarray.tests.test_coarsen",
-  "xarray.tests.test_coding_times",
-  "xarray.tests.test_combine",
-  "xarray.tests.test_computation",
-  "xarray.tests.test_concat",
-  "xarray.tests.test_coordinates",
-  "xarray.tests.test_dask",
-  "xarray.tests.test_dataarray",
-  "xarray.tests.test_duck_array_ops",
-  "xarray.tests.test_indexing",
-  "xarray.tests.test_merge",
-  "xarray.tests.test_missing",
-  "xarray.tests.test_parallelcompat",
-  "xarray.tests.test_sparse",
-  "xarray.tests.test_ufuncs",
-  "xarray.tests.test_units",
-  "xarray.tests.test_utils",
-  "xarray.tests.test_variable",
-  "xarray.tests.test_weighted",
-]
 
 # Use strict = true whenever namedarray has become standalone. In the meantime
 # don't forget to add all new files related to namedarray here:
@@ -249,41 +232,68 @@ extend-exclude = ["doc", "_typed_ops.pyi
 
 [tool.ruff.lint]
 extend-select = [
-  "F",    # Pyflakes
-  "E",    # pycodestyle errors
-  "W",    # pycodestyle warnings
-  "I",    # isort
-  "UP",   # pyupgrade
+  "YTT",  # flake8-2020
   "B",    # flake8-bugbear
   "C4",   # flake8-comprehensions
+  "ISC",  # flake8-implicit-str-concat
   "PIE",  # flake8-pie
   "TID",  # flake8-tidy-imports (absolute imports)
-  "PGH",  # pygrep-hooks
+  "PYI",  # flake8-pyi
+  "SIM",  # flake8-simplify
+  "FLY",  # flynt
+  "I",    # isort
   "PERF", # Perflint
+  "W",    # pycodestyle warnings
+  "PGH",  # pygrep-hooks
+  "PLC",  # Pylint Convention
+  "PLE",  # Pylint Errors
+  "PLR",  # Pylint Refactor
+  "PLW",  # Pylint Warnings
+  "UP",   # pyupgrade
+  "FURB", # refurb
   "RUF",
 ]
 extend-safe-fixes = [
   "TID252", # absolute imports
 ]
 ignore = [
-  "E402",    # module level import not at top of file
-  "E501",    # line too long - let the formatter worry about that
-  "E731",    # do not assign a lambda expression, use a def
-  "UP007",   # use X | Y for type annotations
   "C40",     # unnecessary generator, comprehension, or literal
   "PIE790",  # unnecessary pass statement
+  "PYI019",  # use `Self` instead of custom TypeVar
+  "PYI041",  # use `float` instead of `int | float`
+  "SIM102",  # use a single `if` statement instead of nested `if` statements
+  "SIM108",  # use ternary operator instead of `if`-`else`-block
+  "SIM117",  # use a single `with` statement instead of nested `with` statements
+  "SIM118",  # use `key in dict` instead of `key in dict.keys()`
+  "SIM300",  # yoda condition detected
   "PERF203", # try-except within a loop incurs performance overhead
+  "E402",    # module level import not at top of file
+  "E731",    # do not assign a lambda expression, use a def
+  "PLC0415", # `import` should be at the top-level of a file
+  "PLC0206", # extracting value from dictionary without calling `.items()`
+  "PLR091",  # too many arguments / branches / statements
+  "PLR2004", # magic value used in comparison
+  "PLW0603", # using the global statement to update is discouraged
+  "PLW0642", # reassigned `self` variable in instance method
+  "PLW1641", # object does not implement `__hash__` method
+  "PLW2901", # `for` loop variable overwritten by assignment target
+  "UP007",   # use X | Y for type annotations
+  "FURB105", # unnecessary empty string passed to `print`
   "RUF001",  # string contains ambiguous unicode character
   "RUF002",  # docstring contains ambiguous acute accent unicode character
   "RUF003",  # comment contains ambiguous no-break space unicode character
-  "RUF005",  # consider upacking operator instead of concatenation
+  "RUF005",  # consider unpacking operator instead of concatenation
   "RUF012",  # mutable class attributes
 ]
 
-
 [tool.ruff.lint.per-file-ignores]
 # don't enforce absolute imports
 "asv_bench/**" = ["TID252"]
+# comparison with itself in tests
+"xarray/tests/**" = ["PLR0124"]
+# looks like ruff bugs
+"xarray/core/_typed_ops.py" = ["PYI034"]
+"xarray/namedarray/_typing.py" = ["PYI018", "PYI046"]
 
 [tool.ruff.lint.isort]
 known-first-party = ["xarray"]
@@ -291,6 +301,8 @@ known-first-party = ["xarray"]
 [tool.ruff.lint.flake8-tidy-imports]
 # Disallow all relative imports.
 ban-relative-imports = "all"
+[tool.ruff.lint.flake8-tidy-imports.banned-api]
+"pandas.api.types.is_extension_array_dtype".msg = "Use xarray.core.utils.is_allowed_extension_array{_dtype} instead.  Only use the banend API if the incoming data has already been sanitized by xarray"
 
 [tool.pytest.ini_options]
 addopts = [
@@ -324,30 +336,13 @@ addopts = [
 
 filterwarnings = [
   "error:::xarray.*",
-  "default:No index created:UserWarning:xarray.core.dataset",
-  "default::UserWarning:xarray.tests.test_coding_times",
-  "default::UserWarning:xarray.tests.test_computation",
-  "default::UserWarning:xarray.tests.test_dataset",
-  "default:`ancestors` has been deprecated:DeprecationWarning:xarray.core.treenode",
-  "default:`iter_lineage` has been deprecated:DeprecationWarning:xarray.core.treenode",
-  "default:`lineage` has been deprecated:DeprecationWarning:xarray.core.treenode",
-  "default:coords should be an ndarray:DeprecationWarning:xarray.tests.test_variable",
-  "default:deallocating CachingFileManager:RuntimeWarning:xarray.backends.*",
-  "default:deallocating CachingFileManager:RuntimeWarning:xarray.backends.netCDF4_",
-  "default:deallocating CachingFileManager:RuntimeWarning:xarray.core.indexing",
-  "default:Failed to decode variable.*NumPy will stop allowing conversion of out-of-bound Python integers to integer arrays:DeprecationWarning",
-  "default:The `interpolation` argument to quantile was renamed to `method`:FutureWarning:xarray.*",
-  "default:invalid value encountered in cast:RuntimeWarning:xarray.core.duck_array_ops",
-  "default:invalid value encountered in cast:RuntimeWarning:xarray.conventions",
-  "default:invalid value encountered in cast:RuntimeWarning:xarray.tests.test_units",
-  "default:invalid value encountered in cast:RuntimeWarning:xarray.tests.test_array_api",
-  "default:NumPy will stop allowing conversion of:DeprecationWarning",
-  "default:shape should be provided:DeprecationWarning:xarray.tests.test_variable",
-  "default:the `pandas.MultiIndex` object:FutureWarning:xarray.tests.test_variable",
-  "default:Using a non-tuple sequence for multidimensional indexing is deprecated:FutureWarning",
-  "default:Duplicate dimension names present:UserWarning:xarray.namedarray.core",
+  # Zarr 2 V3 implementation
+  "default:Zarr-Python is not in alignment with the final V3 specification",
   # TODO: this is raised for vlen-utf8, consolidated metadata, U1 dtype
-  "ignore:is currently not part .* the Zarr version 3 specification.",
+  "default:is currently not part .* the Zarr version 3 specification.",
+  # Zarr V3 data type specifications warnings - very repetitive
+  "ignore:The data type .* does not have a Zarr V3 specification",
+  "ignore:Consolidated metadata is currently not part",
   # TODO: remove once we know how to deal with a changed signature in protocols
   "default:::xarray.tests.test_strategies",
 ]
@@ -391,10 +386,13 @@ extend-ignore-identifiers-re = [
 [tool.typos.default.extend-words]
 # NumPy function names
 arange = "arange"
+ond = "ond"
+aso = "aso"
 
 # Technical terms
 nd = "nd"
 nin = "nin"
+nclusive = "nclusive" # part of "inclusive" in error messages
 
 # Variable names
 ba = "ba"
diff -pruN 2025.03.1-8/xarray/__init__.py 2025.10.1-1/xarray/__init__.py
--- 2025.03.1-8/xarray/__init__.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/xarray/__init__.py	2025-10-10 10:38:05.000000000 +0000
@@ -1,16 +1,17 @@
 from importlib.metadata import version as _version
 
-from xarray import coders, groupers, testing, tutorial, ufuncs
+from xarray import coders, groupers, indexes, testing, tutorial, ufuncs
 from xarray.backends.api import (
     load_dataarray,
     load_dataset,
+    load_datatree,
     open_dataarray,
     open_dataset,
     open_datatree,
     open_groups,
     open_mfdataset,
-    save_mfdataset,
 )
+from xarray.backends.writers import save_mfdataset
 from xarray.backends.zarr import open_zarr
 from xarray.coding.cftime_offsets import cftime_range, date_range, date_range_like
 from xarray.coding.cftimeindex import CFTimeIndex
@@ -28,7 +29,7 @@ from xarray.computation.computation impo
 )
 from xarray.conventions import SerializationWarning, decode_cf
 from xarray.core.common import ALL_DIMS, full_like, ones_like, zeros_like
-from xarray.core.coordinates import Coordinates
+from xarray.core.coordinates import Coordinates, CoordinateValidationError
 from xarray.core.dataarray import DataArray
 from xarray.core.dataset import Dataset
 from xarray.core.datatree import DataTree
@@ -50,7 +51,7 @@ from xarray.core.treenode import (
 )
 from xarray.core.variable import IndexVariable, Variable, as_variable
 from xarray.namedarray.core import NamedArray
-from xarray.structure.alignment import align, broadcast
+from xarray.structure.alignment import AlignmentError, align, broadcast
 from xarray.structure.chunks import unify_chunks
 from xarray.structure.combine import combine_by_coords, combine_nested
 from xarray.structure.concat import concat
@@ -70,6 +71,7 @@ __all__ = (  # noqa: RUF022
     # Sub-packages
     "coders",
     "groupers",
+    "indexes",
     "testing",
     "tutorial",
     "ufuncs",
@@ -95,6 +97,7 @@ __all__ = (  # noqa: RUF022
     "infer_freq",
     "load_dataarray",
     "load_dataset",
+    "load_datatree",
     "map_blocks",
     "map_over_datasets",
     "merge",
@@ -128,6 +131,8 @@ __all__ = (  # noqa: RUF022
     "NamedArray",
     "Variable",
     # Exceptions
+    "AlignmentError",
+    "CoordinateValidationError",
     "InvalidTreeError",
     "MergeError",
     "NotFoundInTreeError",
diff -pruN 2025.03.1-8/xarray/backends/api.py 2025.10.1-1/xarray/backends/api.py
--- 2025.03.1-8/xarray/backends/api.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/xarray/backends/api.py	2025-10-10 10:38:05.000000000 +0000
@@ -3,45 +3,36 @@ from __future__ import annotations
 import os
 from collections.abc import (
     Callable,
-    Hashable,
     Iterable,
     Mapping,
-    MutableMapping,
     Sequence,
 )
 from functools import partial
-from io import BytesIO
-from numbers import Number
 from typing import (
     TYPE_CHECKING,
     Any,
-    Final,
     Literal,
+    TypeVar,
     Union,
     cast,
-    overload,
 )
 
-import numpy as np
-
-from xarray import backends, conventions
 from xarray.backends import plugins
 from xarray.backends.common import (
-    AbstractDataStore,
-    ArrayWriter,
+    T_PathFileOrDataStore,
     _find_absolute_paths,
     _normalize_path,
 )
-from xarray.backends.locks import _get_scheduler
 from xarray.coders import CFDatetimeCoder, CFTimedeltaCoder
-from xarray.core import indexing
+from xarray.core import dtypes, indexing
+from xarray.core.coordinates import Coordinates
 from xarray.core.dataarray import DataArray
 from xarray.core.dataset import Dataset
 from xarray.core.datatree import DataTree
 from xarray.core.indexes import Index
 from xarray.core.treenode import group_subtrees
-from xarray.core.types import NetcdfWriteModes, ZarrWriteModes
-from xarray.core.utils import is_remote_uri
+from xarray.core.types import ReadBuffer
+from xarray.core.utils import emit_user_level_warning, is_remote_uri
 from xarray.namedarray.daskmanager import DaskManager
 from xarray.namedarray.parallelcompat import guess_chunkmanager
 from xarray.structure.chunks import _get_chunk, _maybe_chunk
@@ -50,6 +41,13 @@ from xarray.structure.combine import (
     _nested_combine,
     combine_by_coords,
 )
+from xarray.util.deprecation_helpers import (
+    _COMPAT_DEFAULT,
+    _COORDS_DEFAULT,
+    _DATA_VARS_DEFAULT,
+    _JOIN_DEFAULT,
+    CombineKwargDefault,
+)
 
 if TYPE_CHECKING:
     try:
@@ -61,17 +59,16 @@ if TYPE_CHECKING:
     from xarray.core.types import (
         CombineAttrsOptions,
         CompatOptions,
+        ErrorOptionsWithWarn,
         JoinOptions,
         NestedSequence,
-        ReadBuffer,
         T_Chunks,
-        ZarrStoreLike,
     )
 
     T_NetcdfEngine = Literal["netcdf4", "scipy", "h5netcdf"]
     T_Engine = Union[
         T_NetcdfEngine,
-        Literal["pydap", "zarr"],
+        Literal["pydap", "zarr"],  # noqa: PYI051
         type[BackendEntrypoint],
         str,  # no nice typing support for custom backends
         None,
@@ -83,154 +80,6 @@ if TYPE_CHECKING:
 DATAARRAY_NAME = "__xarray_dataarray_name__"
 DATAARRAY_VARIABLE = "__xarray_dataarray_variable__"
 
-ENGINES = {
-    "netcdf4": backends.NetCDF4DataStore.open,
-    "scipy": backends.ScipyDataStore,
-    "pydap": backends.PydapDataStore.open,
-    "h5netcdf": backends.H5NetCDFStore.open,
-    "zarr": backends.ZarrStore.open_group,
-}
-
-
-def _get_default_engine_remote_uri() -> Literal["netcdf4", "pydap"]:
-    engine: Literal["netcdf4", "pydap"]
-    try:
-        import netCDF4  # noqa: F401
-
-        engine = "netcdf4"
-    except ImportError:  # pragma: no cover
-        try:
-            import pydap  # noqa: F401
-
-            engine = "pydap"
-        except ImportError as err:
-            raise ValueError(
-                "netCDF4 or pydap is required for accessing remote datasets via OPeNDAP"
-            ) from err
-    return engine
-
-
-def _get_default_engine_gz() -> Literal["scipy"]:
-    try:
-        import scipy  # noqa: F401
-
-        engine: Final = "scipy"
-    except ImportError as err:  # pragma: no cover
-        raise ValueError("scipy is required for accessing .gz files") from err
-    return engine
-
-
-def _get_default_engine_netcdf() -> Literal["netcdf4", "scipy"]:
-    engine: Literal["netcdf4", "scipy"]
-    try:
-        import netCDF4  # noqa: F401
-
-        engine = "netcdf4"
-    except ImportError:  # pragma: no cover
-        try:
-            import scipy.io.netcdf  # noqa: F401
-
-            engine = "scipy"
-        except ImportError as err:
-            raise ValueError(
-                "cannot read or write netCDF files without "
-                "netCDF4-python or scipy installed"
-            ) from err
-    return engine
-
-
-def _get_default_engine(path: str, allow_remote: bool = False) -> T_NetcdfEngine:
-    if allow_remote and is_remote_uri(path):
-        return _get_default_engine_remote_uri()  # type: ignore[return-value]
-    elif path.endswith(".gz"):
-        return _get_default_engine_gz()
-    else:
-        return _get_default_engine_netcdf()
-
-
-def _validate_dataset_names(dataset: Dataset) -> None:
-    """DataArray.name and Dataset keys must be a string or None"""
-
-    def check_name(name: Hashable):
-        if isinstance(name, str):
-            if not name:
-                raise ValueError(
-                    f"Invalid name {name!r} for DataArray or Dataset key: "
-                    "string must be length 1 or greater for "
-                    "serialization to netCDF or zarr files"
-                )
-        elif name is not None:
-            raise TypeError(
-                f"Invalid name {name!r} for DataArray or Dataset key: "
-                "must be either a string or None for serialization to netCDF "
-                "or zarr files"
-            )
-
-    for k in dataset.variables:
-        check_name(k)
-
-
-def _validate_attrs(dataset, engine, invalid_netcdf=False):
-    """`attrs` must have a string key and a value which is either: a number,
-    a string, an ndarray, a list/tuple of numbers/strings, or a numpy.bool_.
-
-    Notes
-    -----
-    A numpy.bool_ is only allowed when using the h5netcdf engine with
-    `invalid_netcdf=True`.
-    """
-
-    valid_types = (str, Number, np.ndarray, np.number, list, tuple, bytes)
-    if invalid_netcdf and engine == "h5netcdf":
-        valid_types += (np.bool_,)
-
-    def check_attr(name, value, valid_types):
-        if isinstance(name, str):
-            if not name:
-                raise ValueError(
-                    f"Invalid name for attr {name!r}: string must be "
-                    "length 1 or greater for serialization to "
-                    "netCDF files"
-                )
-        else:
-            raise TypeError(
-                f"Invalid name for attr: {name!r} must be a string for "
-                "serialization to netCDF files"
-            )
-
-        if not isinstance(value, valid_types):
-            raise TypeError(
-                f"Invalid value for attr {name!r}: {value!r}. For serialization to "
-                "netCDF files, its value must be of one of the following types: "
-                f"{', '.join([vtype.__name__ for vtype in valid_types])}"
-            )
-
-        if isinstance(value, bytes) and engine == "h5netcdf":
-            try:
-                value.decode("utf-8")
-            except UnicodeDecodeError as e:
-                raise ValueError(
-                    f"Invalid value provided for attribute '{name!r}': {value!r}. "
-                    "Only binary data derived from UTF-8 encoded strings is allowed "
-                    f"for the '{engine}' engine. Consider using the 'netcdf4' engine."
-                ) from e
-
-            if b"\x00" in value:
-                raise ValueError(
-                    f"Invalid value provided for attribute '{name!r}': {value!r}. "
-                    f"Null characters are not permitted for the '{engine}' engine. "
-                    "Consider using the 'netcdf4' engine."
-                )
-
-    # Check attrs on the dataset itself
-    for k, v in dataset.attrs.items():
-        check_attr(k, v, valid_types)
-
-    # Check attrs on each variable within the dataset
-    for variable in dataset.variables.values():
-        for k, v in variable.attrs.items():
-            check_attr(k, v, valid_types)
-
 
 def _resolve_decoders_kwargs(decode_cf, open_backend_dataset_parameters, **decoders):
     for d in list(decoders):
@@ -270,21 +119,27 @@ def _protect_dataset_variables_inplace(d
 
 def _protect_datatree_variables_inplace(tree: DataTree, cache: bool) -> None:
     for node in tree.subtree:
-        _protect_dataset_variables_inplace(node, cache)
+        _protect_dataset_variables_inplace(node.dataset, cache)
 
 
-def _finalize_store(write, store):
+def _finalize_store(writes, store):
     """Finalize this store by explicitly syncing and closing"""
-    del write  # ensure writing is done first
+    del writes  # ensure writing is done first
     store.close()
 
 
+def delayed_close_after_writes(writes, store):
+    import dask
+
+    return dask.delayed(_finalize_store)(writes, store)
+
+
 def _multi_file_closer(closers):
     for closer in closers:
         closer()
 
 
-def load_dataset(filename_or_obj, **kwargs) -> Dataset:
+def load_dataset(filename_or_obj: T_PathFileOrDataStore, **kwargs) -> Dataset:
     """Open, load into memory, and close a Dataset from a file or file-like
     object.
 
@@ -310,7 +165,7 @@ def load_dataset(filename_or_obj, **kwar
         return ds.load()
 
 
-def load_dataarray(filename_or_obj, **kwargs):
+def load_dataarray(filename_or_obj: T_PathFileOrDataStore, **kwargs) -> DataArray:
     """Open, load into memory, and close a DataArray from a file or file-like
     object containing a single data variable.
 
@@ -336,6 +191,32 @@ def load_dataarray(filename_or_obj, **kw
         return da.load()
 
 
+def load_datatree(filename_or_obj: T_PathFileOrDataStore, **kwargs) -> DataTree:
+    """Open, load into memory, and close a DataTree from a file or file-like
+    object.
+
+    This is a thin wrapper around :py:meth:`~xarray.open_datatree`. It differs
+    from `open_datatree` in that it loads the DataTree into memory, closes the
+    file, and returns the DataTree. In contrast, `open_datatree` keeps the file
+    handle open and lazy loads its contents. All parameters are passed directly
+    to `open_datatree`. See that documentation for further details.
+
+    Returns
+    -------
+    datatree : DataTree
+        The newly created DataTree.
+
+    See Also
+    --------
+    open_datatree
+    """
+    if "cache" in kwargs:
+        raise TypeError("cache has no effect in this context")
+
+    with open_datatree(filename_or_obj, **kwargs) as dt:
+        return dt.load()
+
+
 def _chunk_ds(
     backend_ds,
     filename_or_obj,
@@ -378,6 +259,15 @@ def _chunk_ds(
     return backend_ds._replace(variables)
 
 
+def _maybe_create_default_indexes(ds):
+    to_index = {
+        name: coord.variable
+        for name, coord in ds.coords.items()
+        if coord.dims == (name,) and name not in ds.xindexes
+    }
+    return ds.assign_coords(Coordinates(to_index))
+
+
 def _dataset_from_backend_dataset(
     backend_ds,
     filename_or_obj,
@@ -388,6 +278,7 @@ def _dataset_from_backend_dataset(
     inline_array,
     chunked_array_type,
     from_array_kwargs,
+    create_default_indexes,
     **extra_tokens,
 ):
     if not isinstance(chunks, int | dict) and chunks not in {None, "auto"}:
@@ -396,11 +287,15 @@ def _dataset_from_backend_dataset(
         )
 
     _protect_dataset_variables_inplace(backend_ds, cache)
-    if chunks is None:
-        ds = backend_ds
+
+    if create_default_indexes:
+        ds = _maybe_create_default_indexes(backend_ds)
     else:
+        ds = backend_ds
+
+    if chunks is not None:
         ds = _chunk_ds(
-            backend_ds,
+            ds,
             filename_or_obj,
             engine,
             chunks,
@@ -433,6 +328,7 @@ def _datatree_from_backend_datatree(
     inline_array,
     chunked_array_type,
     from_array_kwargs,
+    create_default_indexes,
     **extra_tokens,
 ):
     if not isinstance(chunks, int | dict) and chunks not in {None, "auto"}:
@@ -441,9 +337,11 @@ def _datatree_from_backend_datatree(
         )
 
     _protect_datatree_variables_inplace(backend_tree, cache)
-    if chunks is None:
-        tree = backend_tree
+    if create_default_indexes:
+        tree = backend_tree.map_over_datasets(_maybe_create_default_indexes)
     else:
+        tree = backend_tree
+    if chunks is not None:
         tree = DataTree.from_dict(
             {
                 path: _chunk_ds(
@@ -458,11 +356,12 @@ def _datatree_from_backend_datatree(
                     node=path,
                     **extra_tokens,
                 )
-                for path, [node] in group_subtrees(backend_tree)
+                for path, [node] in group_subtrees(tree)
             },
-            name=backend_tree.name,
+            name=tree.name,
         )
 
+    if create_default_indexes or chunks is not None:
         for path, [node] in group_subtrees(backend_tree):
             tree[path].set_close(node._close)
 
@@ -477,25 +376,24 @@ def _datatree_from_backend_datatree(
 
 
 def open_dataset(
-    filename_or_obj: str | os.PathLike[Any] | ReadBuffer | AbstractDataStore,
+    filename_or_obj: T_PathFileOrDataStore,
     *,
     engine: T_Engine = None,
     chunks: T_Chunks = None,
     cache: bool | None = None,
     decode_cf: bool | None = None,
     mask_and_scale: bool | Mapping[str, bool] | None = None,
-    decode_times: bool
-    | CFDatetimeCoder
-    | Mapping[str, bool | CFDatetimeCoder]
-    | None = None,
-    decode_timedelta: bool
-    | CFTimedeltaCoder
-    | Mapping[str, bool | CFTimedeltaCoder]
-    | None = None,
+    decode_times: (
+        bool | CFDatetimeCoder | Mapping[str, bool | CFDatetimeCoder] | None
+    ) = None,
+    decode_timedelta: (
+        bool | CFTimedeltaCoder | Mapping[str, bool | CFTimedeltaCoder] | None
+    ) = None,
     use_cftime: bool | Mapping[str, bool] | None = None,
     concat_characters: bool | Mapping[str, bool] | None = None,
     decode_coords: Literal["coordinates", "all"] | bool | None = None,
     drop_variables: str | Iterable[str] | None = None,
+    create_default_indexes: bool = True,
     inline_array: bool = False,
     chunked_array_type: str | None = None,
     from_array_kwargs: dict[str, Any] | None = None,
@@ -506,26 +404,30 @@ def open_dataset(
 
     Parameters
     ----------
-    filename_or_obj : str, Path, file-like or DataStore
+    filename_or_obj : str, Path, file-like, bytes, memoryview or DataStore
         Strings and Path objects are interpreted as a path to a netCDF file
         or an OpenDAP URL and opened with python-netCDF4, unless the filename
         ends with .gz, in which case the file is gunzipped and opened with
-        scipy.io.netcdf (only netCDF3 supported). Byte-strings or file-like
-        objects are opened by scipy.io.netcdf (netCDF3) or h5py (netCDF4/HDF).
+        scipy.io.netcdf (only netCDF3 supported). Bytes, memoryview and
+        file-like objects are opened by scipy.io.netcdf (netCDF3) or h5netcdf
+        (netCDF4).
     engine : {"netcdf4", "scipy", "pydap", "h5netcdf", "zarr", None}\
         , installed backend \
         or subclass of xarray.backends.BackendEntrypoint, optional
         Engine to use when reading files. If not provided, the default engine
-        is chosen based on available dependencies, with a preference for
-        "netcdf4". A custom backend class (a subclass of ``BackendEntrypoint``)
-        can also be used.
+        is chosen based on available dependencies, by default preferring
+        "netcdf4" over "h5netcdf" over "scipy" (customizable via
+        ``netcdf_engine_order`` in ``xarray.set_options()``). A custom backend
+        class (a subclass of ``BackendEntrypoint``) can also be used.
     chunks : int, dict, 'auto' or None, default: None
         If provided, used to load the data into dask arrays.
 
         - ``chunks="auto"`` will use dask ``auto`` chunking taking into account the
           engine preferred chunks.
-        - ``chunks=None`` skips using dask, which is generally faster for
-          small arrays.
+        - ``chunks=None`` skips using dask. This uses xarray's internally private
+          :ref:`lazy indexing classes <internal design.lazy indexing>`,
+          but data is eagerly loaded into memory as numpy arrays when accessed.
+          This can be more efficient for smaller arrays or when large arrays are sliced before computation.
         - ``chunks=-1`` loads the data with dask using a single chunk for all arrays.
         - ``chunks={}`` loads the data with dask using the engine's preferred chunk
           size, generally identical to the format's chunk size. If not available, a
@@ -609,6 +511,13 @@ def open_dataset(
         A variable or list of variables to exclude from being parsed from the
         dataset. This may be useful to drop variables with problems or
         inconsistent values.
+    create_default_indexes : bool, default: True
+        If True, create pandas indexes for :term:`dimension coordinates <dimension coordinate>`,
+        which loads the coordinate data into memory. Set it to False if you want to avoid loading
+        data into memory.
+
+        Note that backends can still choose to create other indexes. If you want to control that,
+        please refer to the backend's documentation.
     inline_array: bool, default: False
         How to include the array in the dask task graph.
         By default(``inline_array=False``) the array is included in a task by
@@ -701,6 +610,7 @@ def open_dataset(
         chunked_array_type,
         from_array_kwargs,
         drop_variables=drop_variables,
+        create_default_indexes=create_default_indexes,
         **decoders,
         **kwargs,
     )
@@ -708,22 +618,22 @@ def open_dataset(
 
 
 def open_dataarray(
-    filename_or_obj: str | os.PathLike[Any] | ReadBuffer | AbstractDataStore,
+    filename_or_obj: T_PathFileOrDataStore,
     *,
-    engine: T_Engine | None = None,
-    chunks: T_Chunks | None = None,
+    engine: T_Engine = None,
+    chunks: T_Chunks = None,
     cache: bool | None = None,
     decode_cf: bool | None = None,
     mask_and_scale: bool | None = None,
-    decode_times: bool
-    | CFDatetimeCoder
-    | Mapping[str, bool | CFDatetimeCoder]
-    | None = None,
+    decode_times: (
+        bool | CFDatetimeCoder | Mapping[str, bool | CFDatetimeCoder] | None
+    ) = None,
     decode_timedelta: bool | CFTimedeltaCoder | None = None,
     use_cftime: bool | None = None,
     concat_characters: bool | None = None,
     decode_coords: Literal["coordinates", "all"] | bool | None = None,
     drop_variables: str | Iterable[str] | None = None,
+    create_default_indexes: bool = True,
     inline_array: bool = False,
     chunked_array_type: str | None = None,
     from_array_kwargs: dict[str, Any] | None = None,
@@ -738,25 +648,30 @@ def open_dataarray(
 
     Parameters
     ----------
-    filename_or_obj : str, Path, file-like or DataStore
+    filename_or_obj : str, Path, file-like, bytes, memoryview or DataStore
         Strings and Path objects are interpreted as a path to a netCDF file
         or an OpenDAP URL and opened with python-netCDF4, unless the filename
         ends with .gz, in which case the file is gunzipped and opened with
-        scipy.io.netcdf (only netCDF3 supported). Byte-strings or file-like
-        objects are opened by scipy.io.netcdf (netCDF3) or h5py (netCDF4/HDF).
+        scipy.io.netcdf (only netCDF3 supported). Bytes, memoryview and
+        file-like objects are opened by scipy.io.netcdf (netCDF3) or h5netcdf
+        (netCDF4).
     engine : {"netcdf4", "scipy", "pydap", "h5netcdf", "zarr", None}\
         , installed backend \
         or subclass of xarray.backends.BackendEntrypoint, optional
         Engine to use when reading files. If not provided, the default engine
-        is chosen based on available dependencies, with a preference for
-        "netcdf4".
+        is chosen based on available dependencies, by default preferring
+        "netcdf4" over "h5netcdf" over "scipy" (customizable via
+        ``netcdf_engine_order`` in ``xarray.set_options()``). A custom backend
+        class (a subclass of ``BackendEntrypoint``) can also be used.
     chunks : int, dict, 'auto' or None, default: None
         If provided, used to load the data into dask arrays.
 
         - ``chunks='auto'`` will use dask ``auto`` chunking taking into account the
           engine preferred chunks.
-        - ``chunks=None`` skips using dask, which is generally faster for
-          small arrays.
+        - ``chunks=None`` skips using dask. This uses xarray's internally private
+          :ref:`lazy indexing classes <internal design.lazy indexing>`,
+          but data is eagerly loaded into memory as numpy arrays when accessed.
+          This can be more efficient for smaller arrays, though results may vary.
         - ``chunks=-1`` loads the data with dask using a single chunk for all arrays.
         - ``chunks={}`` loads the data with dask using engine preferred chunks if
           exposed by the backend, otherwise with a single chunk for all arrays.
@@ -832,6 +747,13 @@ def open_dataarray(
         A variable or list of variables to exclude from being parsed from the
         dataset. This may be useful to drop variables with problems or
         inconsistent values.
+    create_default_indexes : bool, default: True
+        If True, create pandas indexes for :term:`dimension coordinates <dimension coordinate>`,
+        which loads the coordinate data into memory. Set it to False if you want to avoid loading
+        data into memory.
+
+        Note that backends can still choose to create other indexes. If you want to control that,
+        please refer to the backend's documentation.
     inline_array: bool, default: False
         How to include the array in the dask task graph.
         By default(``inline_array=False``) the array is included in a task by
@@ -889,6 +811,7 @@ def open_dataarray(
         chunks=chunks,
         cache=cache,
         drop_variables=drop_variables,
+        create_default_indexes=create_default_indexes,
         inline_array=inline_array,
         chunked_array_type=chunked_array_type,
         from_array_kwargs=from_array_kwargs,
@@ -926,25 +849,24 @@ def open_dataarray(
 
 
 def open_datatree(
-    filename_or_obj: str | os.PathLike[Any] | ReadBuffer | AbstractDataStore,
+    filename_or_obj: T_PathFileOrDataStore,
     *,
     engine: T_Engine = None,
     chunks: T_Chunks = None,
     cache: bool | None = None,
     decode_cf: bool | None = None,
     mask_and_scale: bool | Mapping[str, bool] | None = None,
-    decode_times: bool
-    | CFDatetimeCoder
-    | Mapping[str, bool | CFDatetimeCoder]
-    | None = None,
-    decode_timedelta: bool
-    | CFTimedeltaCoder
-    | Mapping[str, bool | CFTimedeltaCoder]
-    | None = None,
+    decode_times: (
+        bool | CFDatetimeCoder | Mapping[str, bool | CFDatetimeCoder] | None
+    ) = None,
+    decode_timedelta: (
+        bool | CFTimedeltaCoder | Mapping[str, bool | CFTimedeltaCoder] | None
+    ) = None,
     use_cftime: bool | Mapping[str, bool] | None = None,
     concat_characters: bool | Mapping[str, bool] | None = None,
     decode_coords: Literal["coordinates", "all"] | bool | None = None,
     drop_variables: str | Iterable[str] | None = None,
+    create_default_indexes: bool = True,
     inline_array: bool = False,
     chunked_array_type: str | None = None,
     from_array_kwargs: dict[str, Any] | None = None,
@@ -956,21 +878,26 @@ def open_datatree(
 
     Parameters
     ----------
-    filename_or_obj : str, Path, file-like, or DataStore
-        Strings and Path objects are interpreted as a path to a netCDF file or Zarr store.
+    filename_or_obj : str, Path, file-like, bytes or DataStore
+        Strings and Path objects are interpreted as a path to a netCDF file or
+        Zarr store. Bytes and memoryview objects are interpreted as file
+        contents.
     engine : {"netcdf4", "h5netcdf", "zarr", None}, \
              installed backend or xarray.backends.BackendEntrypoint, optional
         Engine to use when reading files. If not provided, the default engine
-        is chosen based on available dependencies, with a preference for
-        "netcdf4". A custom backend class (a subclass of ``BackendEntrypoint``)
-        can also be used.
+        is chosen based on available dependencies, by default preferring
+        "h5netcdf" over "netcdf4" (customizable via ``netcdf_engine_order`` in
+        ``xarray.set_options()``). A custom backend class (a subclass of
+        ``BackendEntrypoint``) can also be used.
     chunks : int, dict, 'auto' or None, default: None
         If provided, used to load the data into dask arrays.
 
         - ``chunks="auto"`` will use dask ``auto`` chunking taking into account the
           engine preferred chunks.
-        - ``chunks=None`` skips using dask, which is generally faster for
-          small arrays.
+        - ``chunks=None`` skips using dask. This uses xarray's internally private
+          :ref:`lazy indexing classes <internal design.lazy indexing>`,
+          but data is eagerly loaded into memory as numpy arrays when accessed.
+          This can be more efficient for smaller arrays, though results may vary.
         - ``chunks=-1`` loads the data with dask using a single chunk for all arrays.
         - ``chunks={}`` loads the data with dask using the engine's preferred chunk
           size, generally identical to the format's chunk size. If not available, a
@@ -1054,6 +981,13 @@ def open_datatree(
         A variable or list of variables to exclude from being parsed from the
         dataset. This may be useful to drop variables with problems or
         inconsistent values.
+    create_default_indexes : bool, default: True
+        If True, create pandas indexes for :term:`dimension coordinates <dimension coordinate>`,
+        which loads the coordinate data into memory. Set it to False if you want to avoid loading
+        data into memory.
+
+        Note that backends can still choose to create other indexes. If you want to control that,
+        please refer to the backend's documentation.
     inline_array: bool, default: False
         How to include the array in the dask task graph.
         By default(``inline_array=False``) the array is included in a task by
@@ -1110,7 +1044,7 @@ def open_datatree(
         kwargs.update(backend_kwargs)
 
     if engine is None:
-        engine = plugins.guess_engine(filename_or_obj)
+        engine = plugins.guess_engine(filename_or_obj, must_support_groups=True)
 
     if from_array_kwargs is None:
         from_array_kwargs = {}
@@ -1147,6 +1081,7 @@ def open_datatree(
         chunked_array_type,
         from_array_kwargs,
         drop_variables=drop_variables,
+        create_default_indexes=create_default_indexes,
         **decoders,
         **kwargs,
     )
@@ -1155,25 +1090,24 @@ def open_datatree(
 
 
 def open_groups(
-    filename_or_obj: str | os.PathLike[Any] | ReadBuffer | AbstractDataStore,
+    filename_or_obj: T_PathFileOrDataStore,
     *,
     engine: T_Engine = None,
     chunks: T_Chunks = None,
     cache: bool | None = None,
     decode_cf: bool | None = None,
     mask_and_scale: bool | Mapping[str, bool] | None = None,
-    decode_times: bool
-    | CFDatetimeCoder
-    | Mapping[str, bool | CFDatetimeCoder]
-    | None = None,
-    decode_timedelta: bool
-    | CFTimedeltaCoder
-    | Mapping[str, bool | CFTimedeltaCoder]
-    | None = None,
+    decode_times: (
+        bool | CFDatetimeCoder | Mapping[str, bool | CFDatetimeCoder] | None
+    ) = None,
+    decode_timedelta: (
+        bool | CFTimedeltaCoder | Mapping[str, bool | CFTimedeltaCoder] | None
+    ) = None,
     use_cftime: bool | Mapping[str, bool] | None = None,
     concat_characters: bool | Mapping[str, bool] | None = None,
     decode_coords: Literal["coordinates", "all"] | bool | None = None,
     drop_variables: str | Iterable[str] | None = None,
+    create_default_indexes: bool = True,
     inline_array: bool = False,
     chunked_array_type: str | None = None,
     from_array_kwargs: dict[str, Any] | None = None,
@@ -1189,21 +1123,27 @@ def open_groups(
 
     Parameters
     ----------
-    filename_or_obj : str, Path, file-like, or DataStore
-        Strings and Path objects are interpreted as a path to a netCDF file or Zarr store.
+    filename_or_obj : str, Path, file-like, bytes, memoryview or DataStore
+        Strings and Path objects are interpreted as a path to a netCDF file or
+        Zarr store. Bytes and memoryview objects are interpreted as file
+        contents.
     engine : {"netcdf4", "h5netcdf", "zarr", None}, \
              installed backend or xarray.backends.BackendEntrypoint, optional
         Engine to use when reading files. If not provided, the default engine
-        is chosen based on available dependencies, with a preference for
-        "netcdf4". A custom backend class (a subclass of ``BackendEntrypoint``)
+        is chosen based on available dependencies, by default preferring
+        "h5netcdf" over "netcdf4" (customizable via ``netcdf_engine_order`` in
+        ``xarray.set_options()``). A custom backend class (a subclass of
+        ``BackendEntrypoint``) can also be used.
         can also be used.
     chunks : int, dict, 'auto' or None, default: None
         If provided, used to load the data into dask arrays.
 
         - ``chunks="auto"`` will use dask ``auto`` chunking taking into account the
           engine preferred chunks.
-        - ``chunks=None`` skips using dask, which is generally faster for
-          small arrays.
+        - ``chunks=None`` skips using dask. This uses xarray's internally private
+          :ref:`lazy indexing classes <internal design.lazy indexing>`,
+          but data is eagerly loaded into memory as numpy arrays when accessed.
+          This can be more efficient for smaller arrays, though results may vary.
         - ``chunks=-1`` loads the data with dask using a single chunk for all arrays.
         - ``chunks={}`` loads the data with dask using the engine's preferred chunk
           size, generally identical to the format's chunk size. If not available, a
@@ -1285,6 +1225,13 @@ def open_groups(
         A variable or list of variables to exclude from being parsed from the
         dataset. This may be useful to drop variables with problems or
         inconsistent values.
+    create_default_indexes : bool, default: True
+        If True, create pandas indexes for :term:`dimension coordinates <dimension coordinate>`,
+        which loads the coordinate data into memory. Set it to False if you want to avoid loading
+        data into memory.
+
+        Note that backends can still choose to create other indexes. If you want to control that,
+        please refer to the backend's documentation.
     inline_array: bool, default: False
         How to include the array in the dask task graph.
         By default(``inline_array=False``) the array is included in a task by
@@ -1342,7 +1289,7 @@ def open_groups(
         kwargs.update(backend_kwargs)
 
     if engine is None:
-        engine = plugins.guess_engine(filename_or_obj)
+        engine = plugins.guess_engine(filename_or_obj, must_support_groups=True)
 
     if from_array_kwargs is None:
         from_array_kwargs = {}
@@ -1380,6 +1327,7 @@ def open_groups(
             chunked_array_type,
             from_array_kwargs,
             drop_variables=drop_variables,
+            create_default_indexes=create_default_indexes,
             **decoders,
             **kwargs,
         )
@@ -1389,12 +1337,33 @@ def open_groups(
     return groups
 
 
+_FLike = TypeVar("_FLike", bound=Union[str, ReadBuffer])
+
+
+def _remove_path(
+    paths: NestedSequence[_FLike], paths_to_remove: set[_FLike]
+) -> NestedSequence[_FLike]:
+    # Initialize an empty list to store the result
+    result: list[Union[_FLike, NestedSequence[_FLike]]] = []
+
+    for item in paths:
+        if isinstance(item, list):
+            # If the current item is a list, recursively call remove_elements on it
+            nested_result = _remove_path(item, paths_to_remove)
+            if nested_result:  # Only add non-empty lists to avoid adding empty lists
+                result.append(nested_result)
+        elif item not in paths_to_remove:
+            # Add the item to the result if it is not in the set of elements to remove
+            result.append(item)
+
+    return result
+
+
 def open_mfdataset(
-    paths: str
-    | os.PathLike
-    | ReadBuffer
-    | NestedSequence[str | os.PathLike | ReadBuffer],
-    chunks: T_Chunks | None = None,
+    paths: (
+        str | os.PathLike | ReadBuffer | NestedSequence[str | os.PathLike | ReadBuffer]
+    ),
+    chunks: T_Chunks = None,
     concat_dim: (
         str
         | DataArray
@@ -1404,16 +1373,19 @@ def open_mfdataset(
         | Sequence[Index]
         | None
     ) = None,
-    compat: CompatOptions = "no_conflicts",
+    compat: CompatOptions | CombineKwargDefault = _COMPAT_DEFAULT,
     preprocess: Callable[[Dataset], Dataset] | None = None,
-    engine: T_Engine | None = None,
-    data_vars: Literal["all", "minimal", "different"] | list[str] = "all",
-    coords="different",
+    engine: T_Engine = None,
+    data_vars: (
+        Literal["all", "minimal", "different"] | None | list[str] | CombineKwargDefault
+    ) = _DATA_VARS_DEFAULT,
+    coords=_COORDS_DEFAULT,
     combine: Literal["by_coords", "nested"] = "by_coords",
     parallel: bool = False,
-    join: JoinOptions = "outer",
+    join: JoinOptions | CombineKwargDefault = _JOIN_DEFAULT,
     attrs_file: str | os.PathLike | None = None,
     combine_attrs: CombineAttrsOptions = "override",
+    errors: ErrorOptionsWithWarn = "raise",
     **kwargs,
 ) -> Dataset:
     """Open multiple files as a single dataset.
@@ -1477,8 +1449,10 @@ def open_mfdataset(
         , installed backend \
         or subclass of xarray.backends.BackendEntrypoint, optional
         Engine to use when reading files. If not provided, the default engine
-        is chosen based on available dependencies, with a preference for
-        "netcdf4".
+        is chosen based on available dependencies, by default preferring
+        "netcdf4" over "h5netcdf" over "scipy" (customizable via
+        ``netcdf_engine_order`` in ``xarray.set_options()``). A custom backend
+        class (a subclass of ``BackendEntrypoint``) can also be used.
     data_vars : {"minimal", "different", "all"} or list of str, default: "all"
         These data variables will be concatenated together:
           * "minimal": Only data variables in which the dimension already
@@ -1540,6 +1514,12 @@ def open_mfdataset(
 
         If a callable, it must expect a sequence of ``attrs`` dicts and a context object
         as its only parameters.
+    errors : {"raise", "warn", "ignore"}, default: "raise"
+        String indicating how to handle errors in opening dataset.
+
+        - "raise": invalid dataset will raise an exception.
+        - "warn": a warning will be issued for each invalid dataset.
+        - "ignore": invalid dataset will be ignored.
     **kwargs : optional
         Additional arguments passed on to :py:func:`xarray.open_dataset`. For an
         overview of some of the possible options, see the documentation of
@@ -1632,7 +1612,32 @@ def open_mfdataset(
         open_ = open_dataset
         getattr_ = getattr
 
-    datasets = [open_(p, **open_kwargs) for p in paths1d]
+    if errors not in ("raise", "warn", "ignore"):
+        raise ValueError(
+            f"'errors' must be 'raise', 'warn' or 'ignore', got '{errors}'"
+        )
+
+    datasets = []
+    invalid_paths = set()
+    for p in paths1d:
+        try:
+            ds = open_(p, **open_kwargs)
+            datasets.append(ds)
+        except Exception as e:
+            if errors == "raise":
+                raise
+            elif errors == "warn":
+                emit_user_level_warning(f"Could not open {p} due to {e}. Ignoring.")
+            # remove invalid paths
+            invalid_paths.add(p)
+
+    if invalid_paths:
+        paths = _remove_path(paths, invalid_paths)
+        if combine == "nested":
+            # Create new ids and paths based on removed items
+            combined_ids_paths = _infer_concat_order_from_positions(paths)
+            ids = list(combined_ids_paths.keys())
+
     closers = [getattr_(ds, "_close") for ds in datasets]
     if preprocess is not None:
         datasets = [preprocess(ds) for ds in datasets]
@@ -1656,6 +1661,7 @@ def open_mfdataset(
                 ids=ids,
                 join=join,
                 combine_attrs=combine_attrs,
+                fill_value=dtypes.NA,
             )
         elif combine == "by_coords":
             # Redo ordering from coordinates, ignoring how they were ordered
@@ -1686,566 +1692,3 @@ def open_mfdataset(
         combined.attrs = datasets[paths1d.index(attrs_file)].attrs
 
     return combined
-
-
-WRITEABLE_STORES: dict[T_NetcdfEngine, Callable] = {
-    "netcdf4": backends.NetCDF4DataStore.open,
-    "scipy": backends.ScipyDataStore,
-    "h5netcdf": backends.H5NetCDFStore.open,
-}
-
-
-# multifile=True returns writer and datastore
-@overload
-def to_netcdf(
-    dataset: Dataset,
-    path_or_file: str | os.PathLike | None = None,
-    mode: NetcdfWriteModes = "w",
-    format: T_NetcdfTypes | None = None,
-    group: str | None = None,
-    engine: T_NetcdfEngine | None = None,
-    encoding: Mapping[Hashable, Mapping[str, Any]] | None = None,
-    unlimited_dims: Iterable[Hashable] | None = None,
-    compute: bool = True,
-    *,
-    multifile: Literal[True],
-    invalid_netcdf: bool = False,
-    auto_complex: bool | None = None,
-) -> tuple[ArrayWriter, AbstractDataStore]: ...
-
-
-# path=None writes to bytes
-@overload
-def to_netcdf(
-    dataset: Dataset,
-    path_or_file: None = None,
-    mode: NetcdfWriteModes = "w",
-    format: T_NetcdfTypes | None = None,
-    group: str | None = None,
-    engine: T_NetcdfEngine | None = None,
-    encoding: Mapping[Hashable, Mapping[str, Any]] | None = None,
-    unlimited_dims: Iterable[Hashable] | None = None,
-    compute: bool = True,
-    multifile: Literal[False] = False,
-    invalid_netcdf: bool = False,
-    auto_complex: bool | None = None,
-) -> bytes: ...
-
-
-# compute=False returns dask.Delayed
-@overload
-def to_netcdf(
-    dataset: Dataset,
-    path_or_file: str | os.PathLike,
-    mode: NetcdfWriteModes = "w",
-    format: T_NetcdfTypes | None = None,
-    group: str | None = None,
-    engine: T_NetcdfEngine | None = None,
-    encoding: Mapping[Hashable, Mapping[str, Any]] | None = None,
-    unlimited_dims: Iterable[Hashable] | None = None,
-    *,
-    compute: Literal[False],
-    multifile: Literal[False] = False,
-    invalid_netcdf: bool = False,
-    auto_complex: bool | None = None,
-) -> Delayed: ...
-
-
-# default return None
-@overload
-def to_netcdf(
-    dataset: Dataset,
-    path_or_file: str | os.PathLike,
-    mode: NetcdfWriteModes = "w",
-    format: T_NetcdfTypes | None = None,
-    group: str | None = None,
-    engine: T_NetcdfEngine | None = None,
-    encoding: Mapping[Hashable, Mapping[str, Any]] | None = None,
-    unlimited_dims: Iterable[Hashable] | None = None,
-    compute: Literal[True] = True,
-    multifile: Literal[False] = False,
-    invalid_netcdf: bool = False,
-    auto_complex: bool | None = None,
-) -> None: ...
-
-
-# if compute cannot be evaluated at type check time
-# we may get back either Delayed or None
-@overload
-def to_netcdf(
-    dataset: Dataset,
-    path_or_file: str | os.PathLike,
-    mode: NetcdfWriteModes = "w",
-    format: T_NetcdfTypes | None = None,
-    group: str | None = None,
-    engine: T_NetcdfEngine | None = None,
-    encoding: Mapping[Hashable, Mapping[str, Any]] | None = None,
-    unlimited_dims: Iterable[Hashable] | None = None,
-    compute: bool = False,
-    multifile: Literal[False] = False,
-    invalid_netcdf: bool = False,
-    auto_complex: bool | None = None,
-) -> Delayed | None: ...
-
-
-# if multifile cannot be evaluated at type check time
-# we may get back either writer and datastore or Delayed or None
-@overload
-def to_netcdf(
-    dataset: Dataset,
-    path_or_file: str | os.PathLike,
-    mode: NetcdfWriteModes = "w",
-    format: T_NetcdfTypes | None = None,
-    group: str | None = None,
-    engine: T_NetcdfEngine | None = None,
-    encoding: Mapping[Hashable, Mapping[str, Any]] | None = None,
-    unlimited_dims: Iterable[Hashable] | None = None,
-    compute: bool = False,
-    multifile: bool = False,
-    invalid_netcdf: bool = False,
-    auto_complex: bool | None = None,
-) -> tuple[ArrayWriter, AbstractDataStore] | Delayed | None: ...
-
-
-# Any
-@overload
-def to_netcdf(
-    dataset: Dataset,
-    path_or_file: str | os.PathLike | None,
-    mode: NetcdfWriteModes = "w",
-    format: T_NetcdfTypes | None = None,
-    group: str | None = None,
-    engine: T_NetcdfEngine | None = None,
-    encoding: Mapping[Hashable, Mapping[str, Any]] | None = None,
-    unlimited_dims: Iterable[Hashable] | None = None,
-    compute: bool = False,
-    multifile: bool = False,
-    invalid_netcdf: bool = False,
-    auto_complex: bool | None = None,
-) -> tuple[ArrayWriter, AbstractDataStore] | bytes | Delayed | None: ...
-
-
-def to_netcdf(
-    dataset: Dataset,
-    path_or_file: str | os.PathLike | None = None,
-    mode: NetcdfWriteModes = "w",
-    format: T_NetcdfTypes | None = None,
-    group: str | None = None,
-    engine: T_NetcdfEngine | None = None,
-    encoding: Mapping[Hashable, Mapping[str, Any]] | None = None,
-    unlimited_dims: Iterable[Hashable] | None = None,
-    compute: bool = True,
-    multifile: bool = False,
-    invalid_netcdf: bool = False,
-    auto_complex: bool | None = None,
-) -> tuple[ArrayWriter, AbstractDataStore] | bytes | Delayed | None:
-    """This function creates an appropriate datastore for writing a dataset to
-    disk as a netCDF file
-
-    See `Dataset.to_netcdf` for full API docs.
-
-    The ``multifile`` argument is only for the private use of save_mfdataset.
-    """
-    if isinstance(path_or_file, os.PathLike):
-        path_or_file = os.fspath(path_or_file)
-
-    if encoding is None:
-        encoding = {}
-
-    if path_or_file is None:
-        if engine is None:
-            engine = "scipy"
-        elif engine != "scipy":
-            raise ValueError(
-                "invalid engine for creating bytes with "
-                f"to_netcdf: {engine!r}. Only the default engine "
-                "or engine='scipy' is supported"
-            )
-        if not compute:
-            raise NotImplementedError(
-                "to_netcdf() with compute=False is not yet implemented when "
-                "returning bytes"
-            )
-    elif isinstance(path_or_file, str):
-        if engine is None:
-            engine = _get_default_engine(path_or_file)
-        path_or_file = _normalize_path(path_or_file)
-    else:  # file-like object
-        engine = "scipy"
-
-    # validate Dataset keys, DataArray names, and attr keys/values
-    _validate_dataset_names(dataset)
-    _validate_attrs(dataset, engine, invalid_netcdf)
-
-    try:
-        store_open = WRITEABLE_STORES[engine]
-    except KeyError as err:
-        raise ValueError(f"unrecognized engine for to_netcdf: {engine!r}") from err
-
-    if format is not None:
-        format = format.upper()  # type: ignore[assignment]
-
-    # handle scheduler specific logic
-    scheduler = _get_scheduler()
-    have_chunks = any(v.chunks is not None for v in dataset.variables.values())
-
-    autoclose = have_chunks and scheduler in ["distributed", "multiprocessing"]
-    if autoclose and engine == "scipy":
-        raise NotImplementedError(
-            f"Writing netCDF files with the {engine} backend "
-            f"is not currently supported with dask's {scheduler} scheduler"
-        )
-
-    target = path_or_file if path_or_file is not None else BytesIO()
-    kwargs = dict(autoclose=True) if autoclose else {}
-    if invalid_netcdf:
-        if engine == "h5netcdf":
-            kwargs["invalid_netcdf"] = invalid_netcdf
-        else:
-            raise ValueError(
-                f"unrecognized option 'invalid_netcdf' for engine {engine}"
-            )
-    if auto_complex is not None:
-        kwargs["auto_complex"] = auto_complex
-
-    store = store_open(target, mode, format, group, **kwargs)
-
-    if unlimited_dims is None:
-        unlimited_dims = dataset.encoding.get("unlimited_dims", None)
-    if unlimited_dims is not None:
-        if isinstance(unlimited_dims, str) or not isinstance(unlimited_dims, Iterable):
-            unlimited_dims = [unlimited_dims]
-        else:
-            unlimited_dims = list(unlimited_dims)
-
-    writer = ArrayWriter()
-
-    # TODO: figure out how to refactor this logic (here and in save_mfdataset)
-    # to avoid this mess of conditionals
-    try:
-        # TODO: allow this work (setting up the file for writing array data)
-        # to be parallelized with dask
-        dump_to_store(
-            dataset, store, writer, encoding=encoding, unlimited_dims=unlimited_dims
-        )
-        if autoclose:
-            store.close()
-
-        if multifile:
-            return writer, store
-
-        writes = writer.sync(compute=compute)
-
-        if isinstance(target, BytesIO):
-            store.sync()
-            return target.getvalue()
-    finally:
-        if not multifile and compute:  # type: ignore[redundant-expr]
-            store.close()
-
-    if not compute:
-        import dask
-
-        return dask.delayed(_finalize_store)(writes, store)
-    return None
-
-
-def dump_to_store(
-    dataset, store, writer=None, encoder=None, encoding=None, unlimited_dims=None
-):
-    """Store dataset contents to a backends.*DataStore object."""
-    if writer is None:
-        writer = ArrayWriter()
-
-    if encoding is None:
-        encoding = {}
-
-    variables, attrs = conventions.encode_dataset_coordinates(dataset)
-
-    check_encoding = set()
-    for k, enc in encoding.items():
-        # no need to shallow copy the variable again; that already happened
-        # in encode_dataset_coordinates
-        variables[k].encoding = enc
-        check_encoding.add(k)
-
-    if encoder:
-        variables, attrs = encoder(variables, attrs)
-
-    store.store(variables, attrs, check_encoding, writer, unlimited_dims=unlimited_dims)
-
-
-def save_mfdataset(
-    datasets,
-    paths,
-    mode="w",
-    format=None,
-    groups=None,
-    engine=None,
-    compute=True,
-    **kwargs,
-):
-    """Write multiple datasets to disk as netCDF files simultaneously.
-
-    This function is intended for use with datasets consisting of dask.array
-    objects, in which case it can write the multiple datasets to disk
-    simultaneously using a shared thread pool.
-
-    When not using dask, it is no different than calling ``to_netcdf``
-    repeatedly.
-
-    Parameters
-    ----------
-    datasets : list of Dataset
-        List of datasets to save.
-    paths : list of str or list of path-like objects
-        List of paths to which to save each corresponding dataset.
-    mode : {"w", "a"}, optional
-        Write ("w") or append ("a") mode. If mode="w", any existing file at
-        these locations will be overwritten.
-    format : {"NETCDF4", "NETCDF4_CLASSIC", "NETCDF3_64BIT", \
-              "NETCDF3_CLASSIC"}, optional
-        File format for the resulting netCDF file:
-
-        * NETCDF4: Data is stored in an HDF5 file, using netCDF4 API
-          features.
-        * NETCDF4_CLASSIC: Data is stored in an HDF5 file, using only
-          netCDF 3 compatible API features.
-        * NETCDF3_64BIT: 64-bit offset version of the netCDF 3 file format,
-          which fully supports 2+ GB files, but is only compatible with
-          clients linked against netCDF version 3.6.0 or later.
-        * NETCDF3_CLASSIC: The classic netCDF 3 file format. It does not
-          handle 2+ GB files very well.
-
-        All formats are supported by the netCDF4-python library.
-        scipy.io.netcdf only supports the last two formats.
-
-        The default format is NETCDF4 if you are saving a file to disk and
-        have the netCDF4-python library available. Otherwise, xarray falls
-        back to using scipy to write netCDF files and defaults to the
-        NETCDF3_64BIT format (scipy does not support netCDF4).
-    groups : list of str, optional
-        Paths to the netCDF4 group in each corresponding file to which to save
-        datasets (only works for format="NETCDF4"). The groups will be created
-        if necessary.
-    engine : {"netcdf4", "scipy", "h5netcdf"}, optional
-        Engine to use when writing netCDF files. If not provided, the
-        default engine is chosen based on available dependencies, with a
-        preference for "netcdf4" if writing to a file on disk.
-        See `Dataset.to_netcdf` for additional information.
-    compute : bool
-        If true compute immediately, otherwise return a
-        ``dask.delayed.Delayed`` object that can be computed later.
-    **kwargs : dict, optional
-        Additional arguments are passed along to ``to_netcdf``.
-
-    Examples
-    --------
-    Save a dataset into one netCDF per year of data:
-
-    >>> ds = xr.Dataset(
-    ...     {"a": ("time", np.linspace(0, 1, 48))},
-    ...     coords={"time": pd.date_range("2010-01-01", freq="ME", periods=48)},
-    ... )
-    >>> ds
-    <xarray.Dataset> Size: 768B
-    Dimensions:  (time: 48)
-    Coordinates:
-      * time     (time) datetime64[ns] 384B 2010-01-31 2010-02-28 ... 2013-12-31
-    Data variables:
-        a        (time) float64 384B 0.0 0.02128 0.04255 ... 0.9574 0.9787 1.0
-    >>> years, datasets = zip(*ds.groupby("time.year"))
-    >>> paths = [f"{y}.nc" for y in years]
-    >>> xr.save_mfdataset(datasets, paths)
-    """
-    if mode == "w" and len(set(paths)) < len(paths):
-        raise ValueError(
-            "cannot use mode='w' when writing multiple datasets to the same path"
-        )
-
-    for obj in datasets:
-        if not isinstance(obj, Dataset):
-            raise TypeError(
-                "save_mfdataset only supports writing Dataset "
-                f"objects, received type {type(obj)}"
-            )
-
-    if groups is None:
-        groups = [None] * len(datasets)
-
-    if len({len(datasets), len(paths), len(groups)}) > 1:
-        raise ValueError(
-            "must supply lists of the same length for the "
-            "datasets, paths and groups arguments to "
-            "save_mfdataset"
-        )
-
-    writers, stores = zip(
-        *[
-            to_netcdf(
-                ds,
-                path,
-                mode,
-                format,
-                group,
-                engine,
-                compute=compute,
-                multifile=True,
-                **kwargs,
-            )
-            for ds, path, group in zip(datasets, paths, groups, strict=True)
-        ],
-        strict=True,
-    )
-
-    try:
-        writes = [w.sync(compute=compute) for w in writers]
-    finally:
-        if compute:
-            for store in stores:
-                store.close()
-
-    if not compute:
-        import dask
-
-        return dask.delayed(
-            [
-                dask.delayed(_finalize_store)(w, s)
-                for w, s in zip(writes, stores, strict=True)
-            ]
-        )
-
-
-# compute=True returns ZarrStore
-@overload
-def to_zarr(
-    dataset: Dataset,
-    store: ZarrStoreLike | None = None,
-    chunk_store: MutableMapping | str | os.PathLike | None = None,
-    mode: ZarrWriteModes | None = None,
-    synchronizer=None,
-    group: str | None = None,
-    encoding: Mapping | None = None,
-    *,
-    compute: Literal[True] = True,
-    consolidated: bool | None = None,
-    append_dim: Hashable | None = None,
-    region: Mapping[str, slice | Literal["auto"]] | Literal["auto"] | None = None,
-    safe_chunks: bool = True,
-    storage_options: dict[str, str] | None = None,
-    zarr_version: int | None = None,
-    write_empty_chunks: bool | None = None,
-    chunkmanager_store_kwargs: dict[str, Any] | None = None,
-) -> backends.ZarrStore: ...
-
-
-# compute=False returns dask.Delayed
-@overload
-def to_zarr(
-    dataset: Dataset,
-    store: ZarrStoreLike | None = None,
-    chunk_store: MutableMapping | str | os.PathLike | None = None,
-    mode: ZarrWriteModes | None = None,
-    synchronizer=None,
-    group: str | None = None,
-    encoding: Mapping | None = None,
-    *,
-    compute: Literal[False],
-    consolidated: bool | None = None,
-    append_dim: Hashable | None = None,
-    region: Mapping[str, slice | Literal["auto"]] | Literal["auto"] | None = None,
-    safe_chunks: bool = True,
-    storage_options: dict[str, str] | None = None,
-    zarr_version: int | None = None,
-    write_empty_chunks: bool | None = None,
-    chunkmanager_store_kwargs: dict[str, Any] | None = None,
-) -> Delayed: ...
-
-
-def to_zarr(
-    dataset: Dataset,
-    store: ZarrStoreLike | None = None,
-    chunk_store: MutableMapping | str | os.PathLike | None = None,
-    mode: ZarrWriteModes | None = None,
-    synchronizer=None,
-    group: str | None = None,
-    encoding: Mapping | None = None,
-    *,
-    compute: bool = True,
-    consolidated: bool | None = None,
-    append_dim: Hashable | None = None,
-    region: Mapping[str, slice | Literal["auto"]] | Literal["auto"] | None = None,
-    safe_chunks: bool = True,
-    storage_options: dict[str, str] | None = None,
-    zarr_version: int | None = None,
-    zarr_format: int | None = None,
-    write_empty_chunks: bool | None = None,
-    chunkmanager_store_kwargs: dict[str, Any] | None = None,
-) -> backends.ZarrStore | Delayed:
-    """This function creates an appropriate datastore for writing a dataset to
-    a zarr ztore
-
-    See `Dataset.to_zarr` for full API docs.
-    """
-    from xarray.backends.zarr import _choose_default_mode, _get_mappers
-
-    # validate Dataset keys, DataArray names
-    _validate_dataset_names(dataset)
-
-    # Load empty arrays to avoid bug saving zero length dimensions (Issue #5741)
-    # TODO: delete when min dask>=2023.12.1
-    # https://github.com/dask/dask/pull/10506
-    for v in dataset.variables.values():
-        if v.size == 0:
-            v.load()
-
-    if encoding is None:
-        encoding = {}
-
-    kwargs, mapper, chunk_mapper = _get_mappers(
-        storage_options=storage_options, store=store, chunk_store=chunk_store
-    )
-    mode = _choose_default_mode(mode=mode, append_dim=append_dim, region=region)
-
-    if mode == "r+":
-        already_consolidated = consolidated
-        consolidate_on_close = False
-    else:
-        already_consolidated = False
-        consolidate_on_close = consolidated or consolidated is None
-
-    zstore = backends.ZarrStore.open_group(
-        store=mapper,
-        mode=mode,
-        synchronizer=synchronizer,
-        group=group,
-        consolidated=already_consolidated,
-        consolidate_on_close=consolidate_on_close,
-        chunk_store=chunk_mapper,
-        append_dim=append_dim,
-        write_region=region,
-        safe_chunks=safe_chunks,
-        zarr_version=zarr_version,
-        zarr_format=zarr_format,
-        write_empty=write_empty_chunks,
-        **kwargs,
-    )
-
-    dataset = zstore._validate_and_autodetect_region(dataset)
-    zstore._validate_encoding(encoding)
-
-    writer = ArrayWriter()
-    # TODO: figure out how to properly handle unlimited_dims
-    dump_to_store(dataset, zstore, writer, encoding=encoding)
-    writes = writer.sync(
-        compute=compute, chunkmanager_store_kwargs=chunkmanager_store_kwargs
-    )
-
-    if compute:
-        _finalize_store(writes, zstore)
-    else:
-        import dask
-
-        return dask.delayed(_finalize_store)(writes, zstore)
-
-    return zstore
diff -pruN 2025.03.1-8/xarray/backends/chunks.py 2025.10.1-1/xarray/backends/chunks.py
--- 2025.03.1-8/xarray/backends/chunks.py	1970-01-01 00:00:00.000000000 +0000
+++ 2025.10.1-1/xarray/backends/chunks.py	2025-10-10 10:38:05.000000000 +0000
@@ -0,0 +1,279 @@
+import numpy as np
+
+from xarray.core.datatree import Variable
+
+
+def align_nd_chunks(
+    nd_v_chunks: tuple[tuple[int, ...], ...],
+    nd_backend_chunks: tuple[tuple[int, ...], ...],
+) -> tuple[tuple[int, ...], ...]:
+    if len(nd_backend_chunks) != len(nd_v_chunks):
+        raise ValueError(
+            "The number of dimensions on the backend and the variable must be the same."
+        )
+
+    nd_aligned_chunks: list[tuple[int, ...]] = []
+    for backend_chunks, v_chunks in zip(nd_backend_chunks, nd_v_chunks, strict=True):
+        # Validate that they have the same number of elements
+        if sum(backend_chunks) != sum(v_chunks):
+            raise ValueError(
+                "The number of elements in the backend does not "
+                "match the number of elements in the variable. "
+                "This inconsistency should never occur at this stage."
+            )
+
+        # Validate if the backend_chunks satisfy the condition that all the values
+        # excluding the borders are equal
+        if len(set(backend_chunks[1:-1])) > 1:
+            raise ValueError(
+                f"This function currently supports aligning chunks "
+                f"only when backend chunks are of uniform size, excluding borders. "
+                f"If you encounter this error, please report it—this scenario should never occur "
+                f"unless there is an internal misuse. "
+                f"Backend chunks: {backend_chunks}"
+            )
+
+        # The algorithm assumes that there are always two borders on the
+        # Backend and the Array if not, the result is going to be the same
+        # as the input, and there is nothing to optimize
+        if len(backend_chunks) == 1:
+            nd_aligned_chunks.append(backend_chunks)
+            continue
+
+        if len(v_chunks) == 1:
+            nd_aligned_chunks.append(v_chunks)
+            continue
+
+        # Size of the chunk on the backend
+        fixed_chunk = max(backend_chunks)
+
+        # The ideal size of the chunks is the maximum of the two; this would avoid
+        # that we use more memory than expected
+        max_chunk = max(fixed_chunk, *v_chunks)
+
+        # The algorithm assumes that the chunks on this array are aligned except the last one
+        # because it can be considered a partial one
+        aligned_chunks: list[int] = []
+
+        # For simplicity of the algorithm, let's transform the Array chunks in such a way that
+        # we remove the partial chunks. To achieve this, we add artificial data to the borders
+        t_v_chunks = list(v_chunks)
+        t_v_chunks[0] += fixed_chunk - backend_chunks[0]
+        t_v_chunks[-1] += fixed_chunk - backend_chunks[-1]
+
+        # The unfilled_size is the amount of space that has not been filled on the last
+        # processed chunk; this is equivalent to the amount of data that would need to be
+        # added to a partial Zarr chunk to fill it up to the fixed_chunk size
+        unfilled_size = 0
+
+        for v_chunk in t_v_chunks:
+            # Ideally, we should try to preserve the original Dask chunks, but this is only
+            # possible if the last processed chunk was aligned (unfilled_size == 0)
+            ideal_chunk = v_chunk
+            if unfilled_size:
+                # If that scenario is not possible, the best option is to merge the chunks
+                ideal_chunk = v_chunk + aligned_chunks[-1]
+
+            while ideal_chunk:
+                if not unfilled_size:
+                    # If the previous chunk is filled, let's add a new chunk
+                    # of size 0 that will be used on the merging step to simplify the algorithm
+                    aligned_chunks.append(0)
+
+                if ideal_chunk > max_chunk:
+                    # If the ideal_chunk is bigger than the max_chunk,
+                    # we need to increase the last chunk as much as possible
+                    # but keeping it aligned, and then add a new chunk
+                    max_increase = max_chunk - aligned_chunks[-1]
+                    max_increase = (
+                        max_increase - (max_increase - unfilled_size) % fixed_chunk
+                    )
+                    aligned_chunks[-1] += max_increase
+                else:
+                    # Perfect scenario where the chunks can be merged without any split.
+                    aligned_chunks[-1] = ideal_chunk
+
+                ideal_chunk -= aligned_chunks[-1]
+                unfilled_size = (
+                    fixed_chunk - aligned_chunks[-1] % fixed_chunk
+                ) % fixed_chunk
+
+        # Now we have to remove the artificial data added to the borders
+        for order in [-1, 1]:
+            border_size = fixed_chunk - backend_chunks[::order][0]
+            aligned_chunks = aligned_chunks[::order]
+            aligned_chunks[0] -= border_size
+            t_v_chunks = t_v_chunks[::order]
+            t_v_chunks[0] -= border_size
+            if (
+                len(aligned_chunks) >= 2
+                and aligned_chunks[0] + aligned_chunks[1] <= max_chunk
+                and aligned_chunks[0] != t_v_chunks[0]
+            ):
+                # The artificial data added to the border can introduce inefficient chunks
+                # on the borders, for that reason, we will check if we can merge them or not
+                # Example:
+                # backend_chunks = [6, 6, 1]
+                # v_chunks = [6, 7]
+                # t_v_chunks = [6, 12]
+                # The ideal output should preserve the same v_chunks, but the previous loop
+                # is going to produce aligned_chunks = [6, 6, 6]
+                # And after removing the artificial data, we will end up with aligned_chunks = [6, 6, 1]
+                # which is not ideal and can be merged into a single chunk
+                aligned_chunks[1] += aligned_chunks[0]
+                aligned_chunks = aligned_chunks[1:]
+
+            t_v_chunks = t_v_chunks[::order]
+            aligned_chunks = aligned_chunks[::order]
+
+        nd_aligned_chunks.append(tuple(aligned_chunks))
+
+    return tuple(nd_aligned_chunks)
+
+
+def build_grid_chunks(
+    size: int,
+    chunk_size: int,
+    region: slice | None = None,
+) -> tuple[int, ...]:
+    if region is None:
+        region = slice(0, size)
+
+    region_start = region.start or 0
+    # Generate the zarr chunks inside the region of this dim
+    chunks_on_region = [chunk_size - (region_start % chunk_size)]
+    if chunks_on_region[0] >= size:
+        # This is useful for the scenarios where the chunk_size are bigger
+        # than the variable chunks, which can happens when the user specifies
+        # the enc_chunks manually.
+        return (size,)
+    chunks_on_region.extend([chunk_size] * ((size - chunks_on_region[0]) // chunk_size))
+    if (size - chunks_on_region[0]) % chunk_size != 0:
+        chunks_on_region.append((size - chunks_on_region[0]) % chunk_size)
+    return tuple(chunks_on_region)
+
+
+def grid_rechunk(
+    v: Variable,
+    enc_chunks: tuple[int, ...],
+    region: tuple[slice, ...],
+) -> Variable:
+    nd_v_chunks = v.chunks
+    if not nd_v_chunks:
+        return v
+
+    nd_grid_chunks = tuple(
+        build_grid_chunks(
+            v_size,
+            region=interval,
+            chunk_size=chunk_size,
+        )
+        for v_size, chunk_size, interval in zip(
+            v.shape, enc_chunks, region, strict=True
+        )
+    )
+
+    nd_aligned_chunks = align_nd_chunks(
+        nd_v_chunks=nd_v_chunks,
+        nd_backend_chunks=nd_grid_chunks,
+    )
+    v = v.chunk(dict(zip(v.dims, nd_aligned_chunks, strict=True)))
+    return v
+
+
+def validate_grid_chunks_alignment(
+    nd_v_chunks: tuple[tuple[int, ...], ...] | None,
+    enc_chunks: tuple[int, ...],
+    backend_shape: tuple[int, ...],
+    region: tuple[slice, ...],
+    allow_partial_chunks: bool,
+    name: str,
+):
+    if nd_v_chunks is None:
+        return
+    base_error = (
+        "Specified Zarr chunks encoding['chunks']={enc_chunks!r} for "
+        "variable named {name!r} would overlap multiple Dask chunks. "
+        "Please check the Dask chunks at position {v_chunk_pos} and "
+        "{v_chunk_pos_next}, on axis {axis}, they are overlapped "
+        "on the same Zarr chunk in the region {region}. "
+        "Writing this array in parallel with Dask could lead to corrupted data. "
+        "To resolve this issue, consider one of the following options: "
+        "- Rechunk the array using `chunk()`. "
+        "- Modify or delete `encoding['chunks']`. "
+        "- Set `safe_chunks=False`. "
+        "- Enable automatic chunks alignment with `align_chunks=True`."
+    )
+
+    for axis, chunk_size, v_chunks, interval, size in zip(
+        range(len(enc_chunks)),
+        enc_chunks,
+        nd_v_chunks,
+        region,
+        backend_shape,
+        strict=True,
+    ):
+        for i, chunk in enumerate(v_chunks[1:-1]):
+            if chunk % chunk_size:
+                raise ValueError(
+                    base_error.format(
+                        v_chunk_pos=i + 1,
+                        v_chunk_pos_next=i + 2,
+                        v_chunk_size=chunk,
+                        axis=axis,
+                        name=name,
+                        chunk_size=chunk_size,
+                        region=interval,
+                        enc_chunks=enc_chunks,
+                    )
+                )
+
+        interval_start = interval.start or 0
+
+        if len(v_chunks) > 1:
+            # The first border size is the amount of data that needs to be updated on the
+            # first chunk taking into account the region slice.
+            first_border_size = chunk_size
+            if allow_partial_chunks:
+                first_border_size = chunk_size - interval_start % chunk_size
+
+            if (v_chunks[0] - first_border_size) % chunk_size:
+                raise ValueError(
+                    base_error.format(
+                        v_chunk_pos=0,
+                        v_chunk_pos_next=0,
+                        v_chunk_size=v_chunks[0],
+                        axis=axis,
+                        name=name,
+                        chunk_size=chunk_size,
+                        region=interval,
+                        enc_chunks=enc_chunks,
+                    )
+                )
+
+        if not allow_partial_chunks:
+            region_stop = interval.stop or size
+
+            error_on_last_chunk = base_error.format(
+                v_chunk_pos=len(v_chunks) - 1,
+                v_chunk_pos_next=len(v_chunks) - 1,
+                v_chunk_size=v_chunks[-1],
+                axis=axis,
+                name=name,
+                chunk_size=chunk_size,
+                region=interval,
+                enc_chunks=enc_chunks,
+            )
+            if interval_start % chunk_size:
+                # The last chunk which can also be the only one is a partial chunk
+                # if it is not aligned at the beginning
+                raise ValueError(error_on_last_chunk)
+
+            if np.ceil(region_stop / chunk_size) == np.ceil(size / chunk_size):
+                # If the region is covering the last chunk then check
+                # if the reminder with the default chunk size
+                # is equal to the size of the last chunk
+                if v_chunks[-1] % chunk_size != size % chunk_size:
+                    raise ValueError(error_on_last_chunk)
+            elif v_chunks[-1] % chunk_size:
+                raise ValueError(error_on_last_chunk)
diff -pruN 2025.03.1-8/xarray/backends/common.py 2025.10.1-1/xarray/backends/common.py
--- 2025.03.1-8/xarray/backends/common.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/xarray/backends/common.py	2025-10-10 10:38:05.000000000 +0000
@@ -4,9 +4,18 @@ import logging
 import os
 import time
 import traceback
-from collections.abc import Hashable, Iterable, Mapping, Sequence
+from collections.abc import Callable, Hashable, Iterable, Mapping, Sequence
+from dataclasses import dataclass
 from glob import glob
-from typing import TYPE_CHECKING, Any, ClassVar, TypeVar, Union, overload
+from typing import (
+    TYPE_CHECKING,
+    Any,
+    ClassVar,
+    Self,
+    TypeVar,
+    Union,
+    overload,
+)
 
 import numpy as np
 import pandas as pd
@@ -44,14 +53,18 @@ T = TypeVar("T")
 
 
 @overload
-def _normalize_path(path: str | os.PathLike) -> str: ...
+def _normalize_path(path: os.PathLike) -> str: ...
+
+
+@overload
+def _normalize_path(path: str) -> str: ...
 
 
 @overload
 def _normalize_path(path: T) -> T: ...
 
 
-def _normalize_path(path: str | os.PathLike | T) -> str | T:
+def _normalize_path(path: os.PathLike | str | T) -> str | T:
     """
     Normalize pathlikes to string.
 
@@ -76,7 +89,7 @@ def _normalize_path(path: str | os.PathL
     if isinstance(path, str) and not is_remote_uri(path):
         path = os.path.abspath(os.path.expanduser(path))
 
-    return path  # type:ignore [return-value]
+    return path  # type: ignore[return-value]
 
 
 @overload
@@ -188,6 +201,19 @@ def _find_absolute_paths(
     return _normalize_path_list(paths)
 
 
+@dataclass
+class BytesIOProxy:
+    """Proxy object for a write that a memoryview."""
+
+    getvalue: Callable[[], memoryview] | None = None
+
+    def getbuffer(self) -> memoryview:
+        """Get the value of this write as bytes or memory."""
+        if self.getvalue is None:
+            raise ValueError("must set getvalue before fetching value")
+        return self.getvalue()
+
+
 def _open_remote_file(file, mode, storage_options=None):
     import fsspec
 
@@ -229,6 +255,20 @@ def find_root_and_group(ds):
     return ds, group
 
 
+def collect_ancestor_dimensions(group) -> dict[str, int]:
+    """Returns dimensions defined in parent groups.
+
+    If dimensions are defined in multiple ancestors, use the size of the closest
+    ancestor.
+    """
+    dims = {}
+    while (group := group.parent) is not None:
+        for k, v in group.dimensions.items():
+            if k not in dims:
+                dims[k] = len(v)
+    return dims
+
+
 def datatree_from_dict_with_io_cleanup(groups_dict: Mapping[str, Dataset]) -> DataTree:
     """DataTree.from_dict with file clean-up."""
     try:
@@ -270,17 +310,31 @@ def robust_getitem(array, key, catch=Exc
 class BackendArray(NdimSizeLenMixin, indexing.ExplicitlyIndexed):
     __slots__ = ()
 
-    def get_duck_array(self, dtype: np.typing.DTypeLike = None):
+    async def async_getitem(self, key: indexing.ExplicitIndexer) -> np.typing.ArrayLike:
+        raise NotImplementedError("Backend does not support asynchronous loading")
+
+    def get_duck_array(self, dtype: np.typing.DTypeLike | None = None):
         key = indexing.BasicIndexer((slice(None),) * self.ndim)
         return self[key]  # type: ignore[index]
 
+    async def async_get_duck_array(self, dtype: np.typing.DTypeLike | None = None):
+        key = indexing.BasicIndexer((slice(None),) * self.ndim)
+        return await self.async_getitem(key)
+
 
 class AbstractDataStore:
     __slots__ = ()
 
+    def get_child_store(self, group: str) -> Self:  # pragma: no cover
+        """Get a store corresponding to the indicated child group."""
+        raise NotImplementedError()
+
     def get_dimensions(self):  # pragma: no cover
         raise NotImplementedError()
 
+    def get_parent_dimensions(self):  # pragma: no cover
+        return {}
+
     def get_attrs(self):  # pragma: no cover
         raise NotImplementedError()
 
@@ -324,6 +378,11 @@ class AbstractDataStore:
         self.close()
 
 
+T_PathFileOrDataStore = (
+    str | os.PathLike[Any] | ReadBuffer | bytes | memoryview | AbstractDataStore
+)
+
+
 class ArrayWriter:
     __slots__ = ("lock", "regions", "sources", "targets")
 
@@ -338,11 +397,10 @@ class ArrayWriter:
             self.sources.append(source)
             self.targets.append(target)
             self.regions.append(region)
+        elif region:
+            target[region] = source
         else:
-            if region:
-                target[region] = source
-            else:
-                target[...] = source
+            target[...] = source
 
     def sync(self, compute=True, chunkmanager_store_kwargs=None):
         if self.sources:
@@ -390,11 +448,25 @@ class AbstractWritableDataStore(Abstract
         attributes : dict-like
 
         """
-        variables = {k: self.encode_variable(v) for k, v in variables.items()}
-        attributes = {k: self.encode_attribute(v) for k, v in attributes.items()}
-        return variables, attributes
+        encoded_variables = {}
+        for k, v in variables.items():
+            try:
+                encoded_variables[k] = self.encode_variable(v)
+            except Exception as e:
+                e.add_note(f"Raised while encoding variable {k!r} with value {v!r}")
+                raise
+
+        encoded_attributes = {}
+        for k, v in attributes.items():
+            try:
+                encoded_attributes[k] = self.encode_attribute(v)
+            except Exception as e:
+                e.add_note(f"Raised while encoding attribute {k!r} with value {v!r}")
+                raise
+
+        return encoded_variables, encoded_attributes
 
-    def encode_variable(self, v):
+    def encode_variable(self, v, name=None):
         """encode one variable"""
         return v
 
@@ -402,7 +474,10 @@ class AbstractWritableDataStore(Abstract
         """encode one attribute"""
         return a
 
-    def set_dimension(self, dim, length):  # pragma: no cover
+    def prepare_variable(self, name, variable, check_encoding, unlimited_dims):
+        raise NotImplementedError()
+
+    def set_dimension(self, dim, length, is_unlimited):  # pragma: no cover
         raise NotImplementedError()
 
     def set_attribute(self, k, v):  # pragma: no cover
@@ -515,13 +590,14 @@ class AbstractWritableDataStore(Abstract
         if unlimited_dims is None:
             unlimited_dims = set()
 
+        parent_dims = self.get_parent_dimensions()
         existing_dims = self.get_dimensions()
 
         dims = {}
         for v in unlimited_dims:  # put unlimited_dims first
             dims[v] = None
         for v in variables.values():
-            dims.update(dict(zip(v.dims, v.shape, strict=True)))
+            dims |= v.sizes
 
         for dim, length in dims.items():
             if dim in existing_dims and length != existing_dims[dim]:
@@ -529,10 +605,14 @@ class AbstractWritableDataStore(Abstract
                     "Unable to update size for existing dimension"
                     f"{dim!r} ({length} != {existing_dims[dim]})"
                 )
-            elif dim not in existing_dims:
+            elif dim not in existing_dims and length != parent_dims.get(dim):
                 is_unlimited = dim in unlimited_dims
                 self.set_dimension(dim, length, is_unlimited)
 
+    def sync(self):
+        """Write all buffered data to disk."""
+        raise NotImplementedError()
+
 
 def _infer_dtype(array, name=None):
     """Given an object array with no missing values, infer its dtype from all elements."""
@@ -544,11 +624,10 @@ def _infer_dtype(array, name=None):
 
     native_dtypes = set(np.vectorize(type, otypes=[object])(array.ravel()))
     if len(native_dtypes) > 1 and native_dtypes != {bytes, str}:
+        native_dtype_names = ", ".join(x.__name__ for x in native_dtypes)
         raise ValueError(
-            "unable to infer dtype on variable {!r}; object array "
-            "contains mixed native types: {}".format(
-                name, ", ".join(x.__name__ for x in native_dtypes)
-            )
+            f"unable to infer dtype on variable {name!r}; object array "
+            f"contains mixed native types: {native_dtype_names}"
         )
 
     element = array[(0,) * array.ndim]
@@ -569,7 +648,7 @@ def _infer_dtype(array, name=None):
     )
 
 
-def _copy_with_dtype(data, dtype: np.typing.DTypeLike):
+def _copy_with_dtype(data, dtype: np.typing.DTypeLike | None):
     """Create a copy of an array with the given dtype.
 
     We use this instead of np.array() to ensure that custom object dtypes end
@@ -639,9 +718,7 @@ class WritableCFDataStore(AbstractWritab
         variables = {
             k: ensure_dtype_not_object(v, name=k) for k, v in variables.items()
         }
-        variables = {k: self.encode_variable(v) for k, v in variables.items()}
-        attributes = {k: self.encode_attribute(v) for k, v in attributes.items()}
-        return variables, attributes
+        return super().encode(variables, attributes)
 
 
 class BackendEntrypoint:
@@ -676,11 +753,15 @@ class BackendEntrypoint:
     url : str, default: ""
         A string with the URL to the backend's documentation.
         The setting of this attribute is not mandatory.
+    supports_groups : bool, default: False
+        Whether the backend supports opening groups (via open_datatree and
+        open_groups_as_dict) or not.
     """
 
     open_dataset_parameters: ClassVar[tuple | None] = None
     description: ClassVar[str] = ""
     url: ClassVar[str] = ""
+    supports_groups: ClassVar[bool] = False
 
     def __repr__(self) -> str:
         txt = f"<{type(self).__name__}>"
@@ -692,7 +773,12 @@ class BackendEntrypoint:
 
     def open_dataset(
         self,
-        filename_or_obj: str | os.PathLike[Any] | ReadBuffer | AbstractDataStore,
+        filename_or_obj: str
+        | os.PathLike[Any]
+        | ReadBuffer
+        | bytes
+        | memoryview
+        | AbstractDataStore,
         *,
         drop_variables: str | Iterable[str] | None = None,
     ) -> Dataset:
@@ -704,7 +790,12 @@ class BackendEntrypoint:
 
     def guess_can_open(
         self,
-        filename_or_obj: str | os.PathLike[Any] | ReadBuffer | AbstractDataStore,
+        filename_or_obj: str
+        | os.PathLike[Any]
+        | ReadBuffer
+        | bytes
+        | memoryview
+        | AbstractDataStore,
     ) -> bool:
         """
         Backend open_dataset method used by Xarray in :py:func:`~xarray.open_dataset`.
@@ -714,19 +805,31 @@ class BackendEntrypoint:
 
     def open_datatree(
         self,
-        filename_or_obj: str | os.PathLike[Any] | ReadBuffer | AbstractDataStore,
+        filename_or_obj: str
+        | os.PathLike[Any]
+        | ReadBuffer
+        | bytes
+        | memoryview
+        | AbstractDataStore,
         *,
         drop_variables: str | Iterable[str] | None = None,
     ) -> DataTree:
         """
         Backend open_datatree method used by Xarray in :py:func:`~xarray.open_datatree`.
+
+        If implemented, set the class variable supports_groups to True.
         """
 
         raise NotImplementedError()
 
     def open_groups_as_dict(
         self,
-        filename_or_obj: str | os.PathLike[Any] | ReadBuffer | AbstractDataStore,
+        filename_or_obj: str
+        | os.PathLike[Any]
+        | ReadBuffer
+        | bytes
+        | memoryview
+        | AbstractDataStore,
         *,
         drop_variables: str | Iterable[str] | None = None,
     ) -> dict[str, Dataset]:
@@ -737,6 +840,8 @@ class BackendEntrypoint:
         This function exists to provide a universal way to open all groups in a file,
         before applying any additional consistency checks or requirements necessary
         to create a `DataTree` object (typically done using :py:meth:`~xarray.DataTree.from_dict`).
+
+        If implemented, set the class variable supports_groups to True.
         """
 
         raise NotImplementedError()
diff -pruN 2025.03.1-8/xarray/backends/file_manager.py 2025.10.1-1/xarray/backends/file_manager.py
--- 2025.03.1-8/xarray/backends/file_manager.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/xarray/backends/file_manager.py	2025-10-10 10:38:05.000000000 +0000
@@ -1,31 +1,33 @@
 from __future__ import annotations
 
 import atexit
-import contextlib
-import io
 import threading
 import uuid
 import warnings
-from collections.abc import Hashable
-from typing import Any
+from collections.abc import Callable, Hashable, Iterator, Mapping, MutableMapping
+from contextlib import AbstractContextManager, contextmanager
+from typing import Any, Generic, Literal, TypeVar, cast
 
 from xarray.backends.locks import acquire
 from xarray.backends.lru_cache import LRUCache
 from xarray.core import utils
 from xarray.core.options import OPTIONS
+from xarray.core.types import Closable, Lock
 
 # Global cache for storing open files.
-FILE_CACHE: LRUCache[Any, io.IOBase] = LRUCache(
+FILE_CACHE: LRUCache[Any, Closable] = LRUCache(
     maxsize=OPTIONS["file_cache_maxsize"], on_evict=lambda k, v: v.close()
 )
 assert FILE_CACHE.maxsize, "file cache must be at least size one"
 
+T_File = TypeVar("T_File", bound=Closable)
+
 REF_COUNTS: dict[Any, int] = {}
 
-_DEFAULT_MODE = utils.ReprObject("<unused>")
+_OMIT_MODE = utils.ReprObject("<omitted>")
 
 
-class FileManager:
+class FileManager(Generic[T_File]):
     """Manager for acquiring and closing a file object.
 
     Use FileManager subclasses (CachingFileManager in particular) on backend
@@ -33,11 +35,13 @@ class FileManager:
     many open files and transferring them between multiple processes.
     """
 
-    def acquire(self, needs_lock=True):
+    def acquire(self, needs_lock: bool = True) -> T_File:
         """Acquire the file object from this manager."""
         raise NotImplementedError()
 
-    def acquire_context(self, needs_lock=True):
+    def acquire_context(
+        self, needs_lock: bool = True
+    ) -> AbstractContextManager[T_File]:
         """Context manager for acquiring a file. Yields a file object.
 
         The context manager unwinds any actions taken as part of acquisition
@@ -46,12 +50,12 @@ class FileManager:
         """
         raise NotImplementedError()
 
-    def close(self, needs_lock=True):
+    def close(self, needs_lock: bool = True) -> None:
         """Close the file object associated with this manager, if needed."""
         raise NotImplementedError()
 
 
-class CachingFileManager(FileManager):
+class CachingFileManager(FileManager[T_File]):
     """Wrapper for automatically opening and closing file objects.
 
     Unlike files, CachingFileManager objects can be safely pickled and passed
@@ -81,14 +85,14 @@ class CachingFileManager(FileManager):
 
     def __init__(
         self,
-        opener,
-        *args,
-        mode=_DEFAULT_MODE,
-        kwargs=None,
-        lock=None,
-        cache=None,
+        opener: Callable[..., T_File],
+        *args: Any,
+        mode: Any = _OMIT_MODE,
+        kwargs: Mapping[str, Any] | None = None,
+        lock: Lock | None | Literal[False] = None,
+        cache: MutableMapping[Any, T_File] | None = None,
         manager_id: Hashable | None = None,
-        ref_counts=None,
+        ref_counts: dict[Any, int] | None = None,
     ):
         """Initialize a CachingFileManager.
 
@@ -134,13 +138,17 @@ class CachingFileManager(FileManager):
         self._mode = mode
         self._kwargs = {} if kwargs is None else dict(kwargs)
 
-        self._use_default_lock = lock is None or lock is False
-        self._lock = threading.Lock() if self._use_default_lock else lock
+        if lock is None or lock is False:
+            self._use_default_lock = True
+            self._lock: Lock = threading.Lock()
+        else:
+            self._use_default_lock = False
+            self._lock = lock
 
         # cache[self._key] stores the file associated with this object.
         if cache is None:
-            cache = FILE_CACHE
-        self._cache = cache
+            cache = cast(MutableMapping[Any, T_File], FILE_CACHE)
+        self._cache: MutableMapping[Any, T_File] = cache
         if manager_id is None:
             # Each call to CachingFileManager should separately open files.
             manager_id = str(uuid.uuid4())
@@ -155,7 +163,7 @@ class CachingFileManager(FileManager):
         self._ref_counter = _RefCounter(ref_counts)
         self._ref_counter.increment(self._key)
 
-    def _make_key(self):
+    def _make_key(self) -> _HashedSequence:
         """Make a key for caching files in the LRU cache."""
         value = (
             self._opener,
@@ -166,8 +174,8 @@ class CachingFileManager(FileManager):
         )
         return _HashedSequence(value)
 
-    @contextlib.contextmanager
-    def _optional_lock(self, needs_lock):
+    @contextmanager
+    def _optional_lock(self, needs_lock: bool):
         """Context manager for optionally acquiring a lock."""
         if needs_lock:
             with self._lock:
@@ -175,7 +183,7 @@ class CachingFileManager(FileManager):
         else:
             yield
 
-    def acquire(self, needs_lock=True):
+    def acquire(self, needs_lock: bool = True) -> T_File:
         """Acquire a file object from the manager.
 
         A new file is only opened if it has expired from the
@@ -193,8 +201,8 @@ class CachingFileManager(FileManager):
         file, _ = self._acquire_with_cache_info(needs_lock)
         return file
 
-    @contextlib.contextmanager
-    def acquire_context(self, needs_lock=True):
+    @contextmanager
+    def acquire_context(self, needs_lock: bool = True) -> Iterator[T_File]:
         """Context manager for acquiring a file."""
         file, cached = self._acquire_with_cache_info(needs_lock)
         try:
@@ -204,14 +212,14 @@ class CachingFileManager(FileManager):
                 self.close(needs_lock)
             raise
 
-    def _acquire_with_cache_info(self, needs_lock=True):
+    def _acquire_with_cache_info(self, needs_lock: bool = True) -> tuple[T_File, bool]:
         """Acquire a file, returning the file and whether it was cached."""
         with self._optional_lock(needs_lock):
             try:
                 file = self._cache[self._key]
             except KeyError:
                 kwargs = self._kwargs
-                if self._mode is not _DEFAULT_MODE:
+                if self._mode is not _OMIT_MODE:
                     kwargs = kwargs.copy()
                     kwargs["mode"] = self._mode
                 file = self._opener(*self._args, **kwargs)
@@ -223,7 +231,7 @@ class CachingFileManager(FileManager):
             else:
                 return file, True
 
-    def close(self, needs_lock=True):
+    def close(self, needs_lock: bool = True) -> None:
         """Explicitly close any associated file object (if necessary)."""
         # TODO: remove needs_lock if/when we have a reentrant lock in
         # dask.distributed: https://github.com/dask/dask/issues/3832
@@ -282,7 +290,7 @@ class CachingFileManager(FileManager):
 
     def __repr__(self) -> str:
         args_string = ", ".join(map(repr, self._args))
-        if self._mode is not _DEFAULT_MODE:
+        if self._mode is not _OMIT_MODE:
             args_string += f", mode={self._mode!r}"
         return (
             f"{type(self).__name__}({self._opener!r}, {args_string}, "
@@ -290,13 +298,6 @@ class CachingFileManager(FileManager):
         )
 
 
-@atexit.register
-def _remove_del_method():
-    # We don't need to close unclosed files at program exit, and may not be able
-    # to, because Python is cleaning up imports / globals.
-    del CachingFileManager.__del__
-
-
 class _RefCounter:
     """Class for keeping track of reference counts."""
 
@@ -332,25 +333,136 @@ class _HashedSequence(list):
         self[:] = tuple_value
         self.hashvalue = hash(tuple_value)
 
-    def __hash__(self):
+    def __hash__(self) -> int:  # type: ignore[override]
         return self.hashvalue
 
 
-class DummyFileManager(FileManager):
+def _get_none() -> None:
+    return None
+
+
+class PickleableFileManager(FileManager[T_File]):
+    """File manager that supports pickling by reopening a file object.
+
+    Use PickleableFileManager for wrapping file-like objects that do not natively
+    support pickling (e.g., netCDF4.Dataset and h5netcdf.File) in cases where a
+    global cache is not desirable (e.g., for netCDF files opened from bytes in
+    memory, or from existing file objects).
+    """
+
+    def __init__(
+        self,
+        opener: Callable[..., T_File],
+        *args: Any,
+        mode: Any = _OMIT_MODE,
+        kwargs: Mapping[str, Any] | None = None,
+    ):
+        kwargs = {} if kwargs is None else dict(kwargs)
+        self._opener = opener
+        self._args = args
+        self._mode = "a" if mode == "w" else mode
+        self._kwargs = kwargs
+
+        # Note: No need for locking with PickleableFileManager, because all
+        # opening of files happens in the constructor.
+        if mode != _OMIT_MODE:
+            kwargs = kwargs | {"mode": mode}
+        self._file: T_File | None = opener(*args, **kwargs)
+
+    @property
+    def _closed(self) -> bool:
+        # If opener() raised an error in the constructor, _file may not be set
+        return getattr(self, "_file", None) is None
+
+    def _get_unclosed_file(self) -> T_File:
+        if self._closed:
+            raise RuntimeError("file is closed")
+        file = self._file
+        assert file is not None
+        return file
+
+    def acquire(self, needs_lock: bool = True) -> T_File:
+        del needs_lock  # unused
+        return self._get_unclosed_file()
+
+    @contextmanager
+    def acquire_context(self, needs_lock: bool = True) -> Iterator[T_File]:
+        del needs_lock  # unused
+        yield self._get_unclosed_file()
+
+    def close(self, needs_lock: bool = True) -> None:
+        del needs_lock  # unused
+        if not self._closed:
+            file = self._get_unclosed_file()
+            file.close()
+            self._file = None
+            # Remove all references to opener arguments, so they can be garbage
+            # collected.
+            self._args = ()
+            self._mode = _OMIT_MODE
+            self._kwargs = {}
+
+    def __del__(self) -> None:
+        if not self._closed:
+            self.close()
+
+            if OPTIONS["warn_for_unclosed_files"]:
+                warnings.warn(
+                    f"deallocating {self}, but file is not already closed. "
+                    "This may indicate a bug.",
+                    RuntimeWarning,
+                    stacklevel=2,
+                )
+
+    def __getstate__(self):
+        # file is intentionally omitted: we want to open it again
+        opener = _get_none if self._closed else self._opener
+        return (opener, self._args, self._mode, self._kwargs)
+
+    def __setstate__(self, state) -> None:
+        opener, args, mode, kwargs = state
+        self.__init__(opener, *args, mode=mode, kwargs=kwargs)  # type: ignore[misc]
+
+    def __repr__(self) -> str:
+        if self._closed:
+            return f"<closed {type(self).__name__}>"
+        args_string = ", ".join(map(repr, self._args))
+        if self._mode is not _OMIT_MODE:
+            args_string += f", mode={self._mode!r}"
+        kwargs = (
+            self._kwargs | {"memory": utils.ReprObject("...")}
+            if "memory" in self._kwargs
+            else self._kwargs
+        )
+        return f"{type(self).__name__}({self._opener!r}, {args_string}, {kwargs=})"
+
+
+@atexit.register
+def _remove_del_methods():
+    # We don't need to close unclosed files at program exit, and may not be able
+    # to, because Python is cleaning up imports / globals.
+    del CachingFileManager.__del__
+    del PickleableFileManager.__del__
+
+
+class DummyFileManager(FileManager[T_File]):
     """FileManager that simply wraps an open file in the FileManager interface."""
 
-    def __init__(self, value):
+    def __init__(self, value: T_File, *, close: Callable[[], None] | None = None):
+        if close is None:
+            close = value.close
         self._value = value
+        self._close = close
 
-    def acquire(self, needs_lock=True):
-        del needs_lock  # ignored
+    def acquire(self, needs_lock: bool = True) -> T_File:
+        del needs_lock  # unused
         return self._value
 
-    @contextlib.contextmanager
-    def acquire_context(self, needs_lock=True):
-        del needs_lock
+    @contextmanager
+    def acquire_context(self, needs_lock: bool = True) -> Iterator[T_File]:
+        del needs_lock  # unused
         yield self._value
 
-    def close(self, needs_lock=True):
-        del needs_lock  # ignored
-        self._value.close()
+    def close(self, needs_lock: bool = True) -> None:
+        del needs_lock  # unused
+        self._close()
diff -pruN 2025.03.1-8/xarray/backends/h5netcdf_.py 2025.10.1-1/xarray/backends/h5netcdf_.py
--- 2025.03.1-8/xarray/backends/h5netcdf_.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/xarray/backends/h5netcdf_.py	2025-10-10 10:38:05.000000000 +0000
@@ -4,20 +4,28 @@ import functools
 import io
 import os
 from collections.abc import Iterable
-from typing import TYPE_CHECKING, Any
+from typing import TYPE_CHECKING, Any, Self
 
 import numpy as np
 
 from xarray.backends.common import (
     BACKEND_ENTRYPOINTS,
     BackendEntrypoint,
+    BytesIOProxy,
+    T_PathFileOrDataStore,
     WritableCFDataStore,
     _normalize_path,
     _open_remote_file,
+    collect_ancestor_dimensions,
     datatree_from_dict_with_io_cleanup,
     find_root_and_group,
 )
-from xarray.backends.file_manager import CachingFileManager, DummyFileManager
+from xarray.backends.file_manager import (
+    CachingFileManager,
+    DummyFileManager,
+    FileManager,
+    PickleableFileManager,
+)
 from xarray.backends.locks import HDF5_LOCK, combine_locks, ensure_lock, get_write_lock
 from xarray.backends.netCDF4_ import (
     BaseNetCDF4Array,
@@ -40,6 +48,8 @@ from xarray.core.utils import (
 from xarray.core.variable import Variable
 
 if TYPE_CHECKING:
+    import h5netcdf
+
     from xarray.backends.common import AbstractDataStore
     from xarray.core.dataset import Dataset
     from xarray.core.datatree import DataTree
@@ -68,17 +78,16 @@ def _read_attributes(h5netcdf_var):
     # bytes attributes to strings
     attrs = {}
     for k, v in h5netcdf_var.attrs.items():
-        if k not in ["_FillValue", "missing_value"]:
-            if isinstance(v, bytes):
-                try:
-                    v = v.decode("utf-8")
-                except UnicodeDecodeError:
-                    emit_user_level_warning(
-                        f"'utf-8' codec can't decode bytes for attribute "
-                        f"{k!r} of h5netcdf object {h5netcdf_var.name!r}, "
-                        f"returning bytes undecoded.",
-                        UnicodeWarning,
-                    )
+        if k not in ["_FillValue", "missing_value"] and isinstance(v, bytes):
+            try:
+                v = v.decode("utf-8")
+            except UnicodeDecodeError:
+                emit_user_level_warning(
+                    f"'utf-8' codec can't decode bytes for attribute "
+                    f"{k!r} of h5netcdf object {h5netcdf_var.name!r}, "
+                    f"returning bytes undecoded.",
+                    UnicodeWarning,
+                )
         attrs[k] = v
     return attrs
 
@@ -110,7 +119,14 @@ class H5NetCDFStore(WritableCFDataStore)
         "lock",
     )
 
-    def __init__(self, manager, group=None, mode=None, lock=HDF5_LOCK, autoclose=False):
+    def __init__(
+        self,
+        manager: FileManager | h5netcdf.File | h5netcdf.Group,
+        group=None,
+        mode=None,
+        lock=HDF5_LOCK,
+        autoclose=False,
+    ):
         import h5netcdf
 
         if isinstance(manager, h5netcdf.File | h5netcdf.Group):
@@ -135,6 +151,17 @@ class H5NetCDFStore(WritableCFDataStore)
         self.lock = ensure_lock(lock)
         self.autoclose = autoclose
 
+    def get_child_store(self, group: str) -> Self:
+        if self._group is not None:
+            group = os.path.join(self._group, group)
+        return type(self)(
+            self._manager,
+            group=group,
+            mode=self._mode,
+            lock=self.lock,
+            autoclose=self.autoclose,
+        )
+
     @classmethod
     def open(
         cls,
@@ -159,12 +186,12 @@ class H5NetCDFStore(WritableCFDataStore)
                 filename, mode=mode_, storage_options=storage_options
             )
 
-        if isinstance(filename, bytes):
-            raise ValueError(
-                "can't open netCDF4/HDF5 as bytes "
-                "try passing a path or file-like object"
-            )
-        elif isinstance(filename, io.IOBase):
+        if isinstance(filename, BytesIOProxy):
+            source = filename
+            filename = io.BytesIO()
+            source.getvalue = filename.getbuffer
+
+        if isinstance(filename, io.IOBase) and mode == "r":
             magic_number = read_magic_number_from_file(filename)
             if not magic_number.startswith(b"\211HDF\r\n\032\n"):
                 raise ValueError(
@@ -190,7 +217,12 @@ class H5NetCDFStore(WritableCFDataStore)
             else:
                 lock = combine_locks([HDF5_LOCK, get_write_lock(filename)])
 
-        manager = CachingFileManager(h5netcdf.File, filename, mode=mode, kwargs=kwargs)
+        manager_cls = (
+            CachingFileManager
+            if isinstance(filename, str) and not is_remote_uri(filename)
+            else PickleableFileManager
+        )
+        manager = manager_cls(h5netcdf.File, filename, mode=mode, kwargs=kwargs)
         return cls(manager, group=group, mode=mode, lock=lock, autoclose=autoclose)
 
     def _acquire(self, needs_lock=True):
@@ -269,6 +301,9 @@ class H5NetCDFStore(WritableCFDataStore)
     def get_dimensions(self):
         return FrozenDict((k, len(v)) for k, v in self.ds.dimensions.items())
 
+    def get_parent_dimensions(self):
+        return FrozenDict(collect_ancestor_dimensions(self.ds))
+
     def get_encoding(self):
         return {
             "unlimited_dims": {
@@ -287,8 +322,8 @@ class H5NetCDFStore(WritableCFDataStore)
     def set_attribute(self, key, value):
         self.ds.attrs[key] = value
 
-    def encode_variable(self, variable):
-        return _encode_nc4_variable(variable)
+    def encode_variable(self, variable, name=None):
+        return _encode_nc4_variable(variable, name=name)
 
     def prepare_variable(
         self, name, variable, check_encoding=False, unlimited_dims=None
@@ -389,6 +424,15 @@ def _emit_phony_dims_warning():
     )
 
 
+def _normalize_filename_or_obj(
+    filename_or_obj: T_PathFileOrDataStore,
+) -> str | ReadBuffer | AbstractDataStore:
+    if isinstance(filename_or_obj, bytes | memoryview):
+        return io.BytesIO(filename_or_obj)
+    else:
+        return _normalize_path(filename_or_obj)
+
+
 class H5netcdfBackendEntrypoint(BackendEntrypoint):
     """
     Backend for netCDF files based on the h5netcdf package.
@@ -415,11 +459,10 @@ class H5netcdfBackendEntrypoint(BackendE
         "Open netCDF (.nc, .nc4 and .cdf) and most HDF5 files using h5netcdf in Xarray"
     )
     url = "https://docs.xarray.dev/en/stable/generated/xarray.backends.H5netcdfBackendEntrypoint.html"
+    supports_groups = True
 
-    def guess_can_open(
-        self,
-        filename_or_obj: str | os.PathLike[Any] | ReadBuffer | AbstractDataStore,
-    ) -> bool:
+    def guess_can_open(self, filename_or_obj: T_PathFileOrDataStore) -> bool:
+        filename_or_obj = _normalize_filename_or_obj(filename_or_obj)
         magic_number = try_read_magic_number_from_file_or_path(filename_or_obj)
         if magic_number is not None:
             return magic_number.startswith(b"\211HDF\r\n\032\n")
@@ -432,7 +475,7 @@ class H5netcdfBackendEntrypoint(BackendE
 
     def open_dataset(
         self,
-        filename_or_obj: str | os.PathLike[Any] | ReadBuffer | AbstractDataStore,
+        filename_or_obj: T_PathFileOrDataStore,
         *,
         mask_and_scale=True,
         decode_times=True,
@@ -455,7 +498,7 @@ class H5netcdfBackendEntrypoint(BackendE
         # remove and set phony_dims="access" above
         emit_phony_dims_warning, phony_dims = _check_phony_dims(phony_dims)
 
-        filename_or_obj = _normalize_path(filename_or_obj)
+        filename_or_obj = _normalize_filename_or_obj(filename_or_obj)
         store = H5NetCDFStore.open(
             filename_or_obj,
             format=format,
@@ -492,7 +535,7 @@ class H5netcdfBackendEntrypoint(BackendE
 
     def open_datatree(
         self,
-        filename_or_obj: str | os.PathLike[Any] | ReadBuffer | AbstractDataStore,
+        filename_or_obj: T_PathFileOrDataStore,
         *,
         mask_and_scale=True,
         decode_times=True,
@@ -535,7 +578,7 @@ class H5netcdfBackendEntrypoint(BackendE
 
     def open_groups_as_dict(
         self,
-        filename_or_obj: str | os.PathLike[Any] | ReadBuffer | AbstractDataStore,
+        filename_or_obj: T_PathFileOrDataStore,
         *,
         mask_and_scale=True,
         decode_times=True,
@@ -562,7 +605,7 @@ class H5netcdfBackendEntrypoint(BackendE
         # remove and set phony_dims="access" above
         emit_phony_dims_warning, phony_dims = _check_phony_dims(phony_dims)
 
-        filename_or_obj = _normalize_path(filename_or_obj)
+        filename_or_obj = _normalize_filename_or_obj(filename_or_obj)
         store = H5NetCDFStore.open(
             filename_or_obj,
             format=format,
@@ -607,7 +650,7 @@ class H5netcdfBackendEntrypoint(BackendE
         # only warn if phony_dims exist in file
         # remove together with the above check
         # after some versions
-        if store.ds._phony_dim_count > 0 and emit_phony_dims_warning:
+        if store.ds._root._phony_dim_count > 0 and emit_phony_dims_warning:
             _emit_phony_dims_warning()
 
         return groups_dict
diff -pruN 2025.03.1-8/xarray/backends/locks.py 2025.10.1-1/xarray/backends/locks.py
--- 2025.03.1-8/xarray/backends/locks.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/xarray/backends/locks.py	2025-10-10 10:38:05.000000000 +0000
@@ -4,15 +4,17 @@ import multiprocessing
 import threading
 import uuid
 import weakref
-from collections.abc import Hashable, MutableMapping
-from typing import Any, ClassVar
+from collections.abc import Callable, Hashable, MutableMapping, Sequence
+from typing import Any, ClassVar, Literal
 from weakref import WeakValueDictionary
 
+from xarray.core.types import Lock
+
 
 # SerializableLock is adapted from Dask:
 # https://github.com/dask/dask/blob/74e898f0ec712e8317ba86cc3b9d18b6b9922be0/dask/utils.py#L1160-L1224
 # Used under the terms of Dask's license, see licenses/DASK_LICENSE.
-class SerializableLock:
+class SerializableLock(Lock):
     """A Serializable per-process Lock
 
     This wraps a normal ``threading.Lock`` object and satisfies the same
@@ -90,7 +92,7 @@ NETCDFC_LOCK = SerializableLock()
 _FILE_LOCKS: MutableMapping[Any, threading.Lock] = weakref.WeakValueDictionary()
 
 
-def _get_threaded_lock(key):
+def _get_threaded_lock(key: str) -> threading.Lock:
     try:
         lock = _FILE_LOCKS[key]
     except KeyError:
@@ -98,14 +100,14 @@ def _get_threaded_lock(key):
     return lock
 
 
-def _get_multiprocessing_lock(key):
+def _get_multiprocessing_lock(key: str) -> Lock:
     # TODO: make use of the key -- maybe use locket.py?
     # https://github.com/mwilliamson/locket.py
     del key  # unused
     return multiprocessing.Lock()
 
 
-def _get_lock_maker(scheduler=None):
+def _get_lock_maker(scheduler: str | None = None) -> Callable[..., Lock]:
     """Returns an appropriate function for creating resource locks.
 
     Parameters
@@ -118,25 +120,21 @@ def _get_lock_maker(scheduler=None):
     dask.utils.get_scheduler_lock
     """
 
-    if scheduler is None:
-        return _get_threaded_lock
-    elif scheduler == "threaded":
+    if scheduler is None or scheduler == "threaded":
         return _get_threaded_lock
     elif scheduler == "multiprocessing":
         return _get_multiprocessing_lock
     elif scheduler == "distributed":
         # Lazy import distributed since it is can add a significant
         # amount of time to import
-        try:
-            from dask.distributed import Lock as DistributedLock
-        except ImportError:
-            DistributedLock = None
+        from dask.distributed import Lock as DistributedLock
+
         return DistributedLock
     else:
         raise KeyError(scheduler)
 
 
-def _get_scheduler(get=None, collection=None) -> str | None:
+def get_dask_scheduler(get=None, collection=None) -> str | None:
     """Determine the dask scheduler that is being used.
 
     None is returned if no dask scheduler is active.
@@ -174,7 +172,7 @@ def _get_scheduler(get=None, collection=
     return "threaded"
 
 
-def get_write_lock(key):
+def get_write_lock(key: str) -> Lock:
     """Get a scheduler appropriate lock for writing to the given resource.
 
     Parameters
@@ -186,7 +184,7 @@ def get_write_lock(key):
     -------
     Lock object that can be used like a threading.Lock object.
     """
-    scheduler = _get_scheduler()
+    scheduler = get_dask_scheduler()
     lock_maker = _get_lock_maker(scheduler)
     return lock_maker(key)
 
@@ -209,14 +207,14 @@ def acquire(lock, blocking=True):
         return lock.acquire(blocking)
 
 
-class CombinedLock:
+class CombinedLock(Lock):
     """A combination of multiple locks.
 
     Like a locked door, a CombinedLock is locked if any of its constituent
     locks are locked.
     """
 
-    def __init__(self, locks):
+    def __init__(self, locks: Sequence[Lock]):
         self.locks = tuple(set(locks))  # remove duplicates
 
     def acquire(self, blocking=True):
@@ -241,7 +239,7 @@ class CombinedLock:
         return f"CombinedLock({list(self.locks)!r})"
 
 
-class DummyLock:
+class DummyLock(Lock):
     """DummyLock provides the lock API without any actual locking."""
 
     def acquire(self, blocking=True):
@@ -260,9 +258,9 @@ class DummyLock:
         return False
 
 
-def combine_locks(locks):
+def combine_locks(locks: Sequence[Lock]) -> Lock:
     """Combine a sequence of locks into a single lock."""
-    all_locks = []
+    all_locks: list[Lock] = []
     for lock in locks:
         if isinstance(lock, CombinedLock):
             all_locks.extend(lock.locks)
@@ -278,7 +276,7 @@ def combine_locks(locks):
         return DummyLock()
 
 
-def ensure_lock(lock):
+def ensure_lock(lock: Lock | None | Literal[False]) -> Lock:
     """Ensure that the given object is a lock."""
     if lock is None or lock is False:
         return DummyLock()
diff -pruN 2025.03.1-8/xarray/backends/memory.py 2025.10.1-1/xarray/backends/memory.py
--- 2025.03.1-8/xarray/backends/memory.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/xarray/backends/memory.py	2025-10-10 10:38:05.000000000 +0000
@@ -5,6 +5,7 @@ import copy
 import numpy as np
 
 from xarray.backends.common import AbstractWritableDataStore
+from xarray.core import indexing
 from xarray.core.variable import Variable
 
 
@@ -24,7 +25,12 @@ class InMemoryDataStore(AbstractWritable
         return self._attributes
 
     def get_variables(self):
-        return self._variables
+        res = {}
+        for k, v in self._variables.items():
+            v = v.copy(deep=True)
+            res[k] = v
+            v._data = indexing.LazilyIndexedArray(v._data)
+        return res
 
     def get_dimensions(self):
         return {d: s for v in self._variables.values() for d, s in v.dims.items()}
diff -pruN 2025.03.1-8/xarray/backends/netCDF4_.py 2025.10.1-1/xarray/backends/netCDF4_.py
--- 2025.03.1-8/xarray/backends/netCDF4_.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/xarray/backends/netCDF4_.py	2025-10-10 10:38:05.000000000 +0000
@@ -5,22 +5,30 @@ import operator
 import os
 from collections.abc import Iterable
 from contextlib import suppress
-from typing import TYPE_CHECKING, Any
+from dataclasses import dataclass
+from io import IOBase
+from typing import TYPE_CHECKING, Any, Self
 
 import numpy as np
 
-from xarray import coding
 from xarray.backends.common import (
     BACKEND_ENTRYPOINTS,
     BackendArray,
     BackendEntrypoint,
+    BytesIOProxy,
+    T_PathFileOrDataStore,
     WritableCFDataStore,
     _normalize_path,
+    collect_ancestor_dimensions,
     datatree_from_dict_with_io_cleanup,
     find_root_and_group,
     robust_getitem,
 )
-from xarray.backends.file_manager import CachingFileManager, DummyFileManager
+from xarray.backends.file_manager import (
+    CachingFileManager,
+    DummyFileManager,
+    PickleableFileManager,
+)
 from xarray.backends.locks import (
     HDF5_LOCK,
     NETCDFC_LOCK,
@@ -30,6 +38,12 @@ from xarray.backends.locks import (
 )
 from xarray.backends.netcdf3 import encode_nc3_attr_value, encode_nc3_variable
 from xarray.backends.store import StoreBackendEntrypoint
+from xarray.coding.strings import (
+    CharacterArrayCoder,
+    EncodedStringCoder,
+    create_vlen_dtype,
+    is_unicode_dtype,
+)
 from xarray.coding.variables import pop_to
 from xarray.core import indexing
 from xarray.core.utils import (
@@ -41,13 +55,12 @@ from xarray.core.utils import (
 from xarray.core.variable import Variable
 
 if TYPE_CHECKING:
+    import netCDF4
     from h5netcdf.core import EnumType as h5EnumType
     from netCDF4 import EnumType as ncEnumType
 
-    from xarray.backends.common import AbstractDataStore
     from xarray.core.dataset import Dataset
     from xarray.core.datatree import DataTree
-    from xarray.core.types import ReadBuffer
 
 # This lookup table maps from dtype.byteorder to a readable endian
 # string used by netCDF4.
@@ -73,7 +86,7 @@ class BaseNetCDF4Array(BackendArray):
             # check vlen string dtype in further steps
             # it also prevents automatic string concatenation via
             # conventions.decode_cf_variable
-            dtype = coding.strings.create_vlen_dtype(str)
+            dtype = create_vlen_dtype(str)
         self.dtype = dtype
 
     def __setitem__(self, key, value):
@@ -127,12 +140,12 @@ class NetCDF4ArrayWrapper(BaseNetCDF4Arr
         return array
 
 
-def _encode_nc4_variable(var):
+def _encode_nc4_variable(var, name=None):
     for coder in [
-        coding.strings.EncodedStringCoder(allows_unicode=True),
-        coding.strings.CharacterArrayCoder(),
+        EncodedStringCoder(allows_unicode=True),
+        CharacterArrayCoder(),
     ]:
-        var = coder.encode(var)
+        var = coder.encode(var, name=name)
     return var
 
 
@@ -164,7 +177,7 @@ def _nc4_dtype(var):
     if "dtype" in var.encoding:
         dtype = var.encoding.pop("dtype")
         _check_encoding_dtype_is_vlen_string(dtype)
-    elif coding.strings.is_unicode_dtype(var.dtype):
+    elif is_unicode_dtype(var.dtype):
         dtype = str
     elif var.dtype.kind in ["i", "u", "f", "c", "S"]:
         dtype = var.dtype
@@ -353,6 +366,28 @@ def _build_and_get_enum(
     return datatype
 
 
+@dataclass
+class _Thunk:
+    """Pickleable equivalent of `lambda: value`."""
+
+    value: Any
+
+    def __call__(self):
+        return self.value
+
+
+@dataclass
+class _CloseWithCopy:
+    """Wrapper around netCDF4's esoteric interface for writing in-memory data."""
+
+    proxy: BytesIOProxy
+    nc4_dataset: netCDF4.Dataset
+
+    def __call__(self):
+        value = self.nc4_dataset.close()
+        self.proxy.getvalue = _Thunk(value)
+
+
 class NetCDF4DataStore(WritableCFDataStore):
     """Store for reading and writing data via the Python-NetCDF4 library.
 
@@ -396,6 +431,17 @@ class NetCDF4DataStore(WritableCFDataSto
         self.lock = ensure_lock(lock)
         self.autoclose = autoclose
 
+    def get_child_store(self, group: str) -> Self:
+        if self._group is not None:
+            group = os.path.join(self._group, group)
+        return type(self)(
+            self._manager,
+            group=group,
+            mode=self._mode,
+            lock=self.lock,
+            autoclose=self.autoclose,
+        )
+
     @classmethod
     def open(
         cls,
@@ -416,27 +462,31 @@ class NetCDF4DataStore(WritableCFDataSto
         if isinstance(filename, os.PathLike):
             filename = os.fspath(filename)
 
-        if not isinstance(filename, str):
-            raise ValueError(
-                "can only read bytes or file-like objects "
-                "with engine='scipy' or 'h5netcdf'"
+        if isinstance(filename, IOBase):
+            raise TypeError(
+                f"file objects are not supported by the netCDF4 backend: {filename}"
             )
 
+        if not isinstance(filename, str | bytes | memoryview | BytesIOProxy):
+            raise TypeError(f"invalid filename for netCDF4 backend: {filename}")
+
         if format is None:
             format = "NETCDF4"
 
         if lock is None:
             if mode == "r":
-                if is_remote_uri(filename):
+                if isinstance(filename, str) and is_remote_uri(filename):
                     lock = NETCDFC_LOCK
                 else:
                     lock = NETCDF4_PYTHON_LOCK
             else:
                 if format is None or format.startswith("NETCDF4"):
-                    base_lock = NETCDF4_PYTHON_LOCK
+                    lock = NETCDF4_PYTHON_LOCK
                 else:
-                    base_lock = NETCDFC_LOCK
-                lock = combine_locks([base_lock, get_write_lock(filename)])
+                    lock = NETCDFC_LOCK
+
+                if isinstance(filename, str):
+                    lock = combine_locks([lock, get_write_lock(filename)])
 
         kwargs = dict(
             clobber=clobber,
@@ -446,9 +496,31 @@ class NetCDF4DataStore(WritableCFDataSto
         )
         if auto_complex is not None:
             kwargs["auto_complex"] = auto_complex
-        manager = CachingFileManager(
-            netCDF4.Dataset, filename, mode=mode, kwargs=kwargs
-        )
+
+        if isinstance(filename, BytesIOProxy):
+            assert mode == "w"
+            # Size hint used for creating netCDF3 files. Per the documentation
+            # for nc__create(), the special value NC_SIZEHINT_DEFAULT (which is
+            # the value 0), lets the netcdf library choose a suitable initial
+            # size.
+            memory = 0
+            kwargs["diskless"] = False
+            nc4_dataset = netCDF4.Dataset(
+                "<xarray-in-memory-write>", mode=mode, memory=memory, **kwargs
+            )
+            close = _CloseWithCopy(filename, nc4_dataset)
+            manager = DummyFileManager(nc4_dataset, close=close)
+
+        elif isinstance(filename, bytes | memoryview):
+            assert mode == "r"
+            kwargs["memory"] = filename
+            manager = PickleableFileManager(
+                netCDF4.Dataset, "<xarray-in-memory-read>", mode=mode, kwargs=kwargs
+            )
+        else:
+            manager = CachingFileManager(
+                netCDF4.Dataset, filename, mode=mode, kwargs=kwargs
+            )
         return cls(manager, group=group, mode=mode, lock=lock, autoclose=autoclose)
 
     def _acquire(self, needs_lock=True):
@@ -514,6 +586,9 @@ class NetCDF4DataStore(WritableCFDataSto
     def get_dimensions(self):
         return FrozenDict((k, len(v)) for k, v in self.ds.dimensions.items())
 
+    def get_parent_dimensions(self):
+        return FrozenDict(collect_ancestor_dimensions(self.ds))
+
     def get_encoding(self):
         return {
             "unlimited_dims": {
@@ -535,12 +610,12 @@ class NetCDF4DataStore(WritableCFDataSto
         else:
             self.ds.setncattr(key, value)
 
-    def encode_variable(self, variable):
+    def encode_variable(self, variable, name=None):
         variable = _force_native_endianness(variable)
         if self.format == "NETCDF4":
-            variable = _encode_nc4_variable(variable)
+            variable = _encode_nc4_variable(variable, name=name)
         else:
-            variable = encode_nc3_variable(variable)
+            variable = encode_nc3_variable(variable, name=name)
         return variable
 
     def prepare_variable(
@@ -623,14 +698,17 @@ class NetCDF4BackendEntrypoint(BackendEn
         "Open netCDF (.nc, .nc4 and .cdf) and most HDF5 files using netCDF4 in Xarray"
     )
     url = "https://docs.xarray.dev/en/stable/generated/xarray.backends.NetCDF4BackendEntrypoint.html"
+    supports_groups = True
 
-    def guess_can_open(
-        self,
-        filename_or_obj: str | os.PathLike[Any] | ReadBuffer | AbstractDataStore,
-    ) -> bool:
+    def guess_can_open(self, filename_or_obj: T_PathFileOrDataStore) -> bool:
         if isinstance(filename_or_obj, str) and is_remote_uri(filename_or_obj):
             return True
-        magic_number = try_read_magic_number_from_path(filename_or_obj)
+
+        magic_number = (
+            bytes(filename_or_obj[:8])
+            if isinstance(filename_or_obj, bytes | memoryview)
+            else try_read_magic_number_from_path(filename_or_obj)
+        )
         if magic_number is not None:
             # netcdf 3 or HDF5
             return magic_number.startswith((b"CDF", b"\211HDF\r\n\032\n"))
@@ -643,7 +721,7 @@ class NetCDF4BackendEntrypoint(BackendEn
 
     def open_dataset(
         self,
-        filename_or_obj: str | os.PathLike[Any] | ReadBuffer | AbstractDataStore,
+        filename_or_obj: T_PathFileOrDataStore,
         *,
         mask_and_scale=True,
         decode_times=True,
@@ -692,7 +770,7 @@ class NetCDF4BackendEntrypoint(BackendEn
 
     def open_datatree(
         self,
-        filename_or_obj: str | os.PathLike[Any] | ReadBuffer | AbstractDataStore,
+        filename_or_obj: T_PathFileOrDataStore,
         *,
         mask_and_scale=True,
         decode_times=True,
@@ -725,6 +803,7 @@ class NetCDF4BackendEntrypoint(BackendEn
             clobber=clobber,
             diskless=diskless,
             persist=persist,
+            auto_complex=auto_complex,
             lock=lock,
             autoclose=autoclose,
             **kwargs,
@@ -734,7 +813,7 @@ class NetCDF4BackendEntrypoint(BackendEn
 
     def open_groups_as_dict(
         self,
-        filename_or_obj: str | os.PathLike[Any] | ReadBuffer | AbstractDataStore,
+        filename_or_obj: T_PathFileOrDataStore,
         *,
         mask_and_scale=True,
         decode_times=True,
@@ -764,6 +843,7 @@ class NetCDF4BackendEntrypoint(BackendEn
             clobber=clobber,
             diskless=diskless,
             persist=persist,
+            auto_complex=auto_complex,
             lock=lock,
             autoclose=autoclose,
         )
diff -pruN 2025.03.1-8/xarray/backends/netcdf3.py 2025.10.1-1/xarray/backends/netcdf3.py
--- 2025.03.1-8/xarray/backends/netcdf3.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/xarray/backends/netcdf3.py	2025-10-10 10:38:05.000000000 +0000
@@ -111,20 +111,19 @@ def _maybe_prepare_times(var):
     data = var.data
     if data.dtype.kind in "iu":
         units = var.attrs.get("units", None)
-        if units is not None:
-            if coding.variables._is_time_like(units):
-                mask = data == np.iinfo(np.int64).min
-                if mask.any():
-                    data = np.where(mask, var.attrs.get("_FillValue", np.nan), data)
+        if units is not None and coding.variables._is_time_like(units):
+            mask = data == np.iinfo(np.int64).min
+            if mask.any():
+                data = np.where(mask, var.attrs.get("_FillValue", np.nan), data)
     return data
 
 
-def encode_nc3_variable(var):
+def encode_nc3_variable(var, name=None):
     for coder in [
         coding.strings.EncodedStringCoder(allows_unicode=False),
         coding.strings.CharacterArrayCoder(),
     ]:
-        var = coder.encode(var)
+        var = coder.encode(var, name=name)
     data = _maybe_prepare_times(var)
     data = coerce_nc3_dtype(data)
     attrs = encode_nc3_attrs(var.attrs)
diff -pruN 2025.03.1-8/xarray/backends/plugins.py 2025.10.1-1/xarray/backends/plugins.py
--- 2025.03.1-8/xarray/backends/plugins.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/xarray/backends/plugins.py	2025-10-10 10:38:05.000000000 +0000
@@ -9,6 +9,7 @@ from importlib.metadata import entry_poi
 from typing import TYPE_CHECKING, Any
 
 from xarray.backends.common import BACKEND_ENTRYPOINTS, BackendEntrypoint
+from xarray.core.options import OPTIONS
 from xarray.core.utils import module_available
 
 if TYPE_CHECKING:
@@ -18,8 +19,6 @@ if TYPE_CHECKING:
     from xarray.backends.common import AbstractDataStore
     from xarray.core.types import ReadBuffer
 
-STANDARD_BACKENDS_ORDER = ["netcdf4", "h5netcdf", "scipy"]
-
 
 def remove_duplicates(entrypoints: EntryPoints) -> list[EntryPoint]:
     # sort and group entrypoints by name
@@ -91,8 +90,8 @@ def set_missing_parameters(
 def sort_backends(
     backend_entrypoints: dict[str, type[BackendEntrypoint]],
 ) -> dict[str, type[BackendEntrypoint]]:
-    ordered_backends_entrypoints = {}
-    for be_name in STANDARD_BACKENDS_ORDER:
+    ordered_backends_entrypoints: dict[str, type[BackendEntrypoint]] = {}
+    for be_name in OPTIONS["netcdf_engine_order"]:
         if backend_entrypoints.get(be_name) is not None:
             ordered_backends_entrypoints[be_name] = backend_entrypoints.pop(be_name)
     ordered_backends_entrypoints.update(
@@ -138,11 +137,19 @@ def refresh_engines() -> None:
 
 
 def guess_engine(
-    store_spec: str | os.PathLike[Any] | ReadBuffer | AbstractDataStore,
+    store_spec: str
+    | os.PathLike[Any]
+    | ReadBuffer
+    | bytes
+    | memoryview
+    | AbstractDataStore,
+    must_support_groups: bool = False,
 ) -> str | type[BackendEntrypoint]:
     engines = list_engines()
 
     for engine, backend in engines.items():
+        if must_support_groups and not backend.supports_groups:
+            continue
         try:
             if backend.guess_can_open(store_spec):
                 return engine
@@ -157,6 +164,8 @@ def guess_engine(
     for engine, (_, backend_cls) in BACKEND_ENTRYPOINTS.items():
         try:
             backend = backend_cls()
+            if must_support_groups and not backend.supports_groups:
+                continue
             if backend.guess_can_open(store_spec):
                 compatible_engines.append(engine)
         except Exception:
@@ -175,6 +184,15 @@ def guess_engine(
                 "https://docs.xarray.dev/en/stable/getting-started-guide/installing.html\n"
                 "https://docs.xarray.dev/en/stable/user-guide/io.html"
             )
+        elif must_support_groups:
+            error_msg = (
+                "xarray is unable to open this file because it has no currently "
+                "installed IO backends that support reading groups (e.g., h5netcdf "
+                "or netCDF4-python). Xarray's read/write support requires "
+                "installing optional IO dependencies, see:\n"
+                "https://docs.xarray.dev/en/stable/getting-started-guide/installing.html\n"
+                "https://docs.xarray.dev/en/stable/user-guide/io"
+            )
         else:
             error_msg = (
                 "xarray is unable to open this file because it has no currently "
diff -pruN 2025.03.1-8/xarray/backends/pydap_.py 2025.10.1-1/xarray/backends/pydap_.py
--- 2025.03.1-8/xarray/backends/pydap_.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/xarray/backends/pydap_.py	2025-10-10 10:38:05.000000000 +0000
@@ -10,6 +10,9 @@ from xarray.backends.common import (
     AbstractDataStore,
     BackendArray,
     BackendEntrypoint,
+    T_PathFileOrDataStore,
+    _normalize_path,
+    datatree_from_dict_with_io_cleanup,
     robust_getitem,
 )
 from xarray.backends.store import StoreBackendEntrypoint
@@ -18,7 +21,6 @@ from xarray.core.utils import (
     Frozen,
     FrozenDict,
     close_on_error,
-    is_dict_like,
     is_remote_uri,
 )
 from xarray.core.variable import Variable
@@ -28,6 +30,7 @@ if TYPE_CHECKING:
     import os
 
     from xarray.core.dataset import Dataset
+    from xarray.core.datatree import DataTree
     from xarray.core.types import ReadBuffer
 
 
@@ -49,36 +52,26 @@ class PydapArrayWrapper(BackendArray):
         )
 
     def _getitem(self, key):
-        # pull the data from the array attribute if possible, to avoid
-        # downloading coordinate data twice
-        array = getattr(self.array, "array", self.array)
-        result = robust_getitem(array, key, catch=ValueError)
-        result = np.asarray(result)
+        result = robust_getitem(self.array, key, catch=ValueError)
         # in some cases, pydap doesn't squeeze axes automatically like numpy
+        result = np.asarray(result)
         axis = tuple(n for n, k in enumerate(key) if isinstance(k, integer_types))
-        if result.ndim + len(axis) != array.ndim and axis:
+        if result.ndim + len(axis) != self.array.ndim and axis:
             result = np.squeeze(result, axis)
 
         return result
 
 
-def _fix_attributes(attributes):
-    attributes = dict(attributes)
-    for k in list(attributes):
-        if k.lower() == "global" or k.lower().endswith("_global"):
-            # move global attributes to the top level, like the netcdf-C
-            # DAP client
-            attributes.update(attributes.pop(k))
-        elif is_dict_like(attributes[k]):
-            # Make Hierarchical attributes to a single level with a
-            # dot-separated key
-            attributes.update(
-                {
-                    f"{k}.{k_child}": v_child
-                    for k_child, v_child in attributes.pop(k).items()
-                }
-            )
-    return attributes
+def get_group(ds, group):
+    if group in {None, "", "/"}:
+        # use the root group
+        return ds
+    else:
+        try:
+            return ds[group]
+        except KeyError as e:
+            # wrap error to provide slightly more helpful message
+            raise KeyError(f"group not found: {group}", e) from e
 
 
 class PydapDataStore(AbstractDataStore):
@@ -88,18 +81,22 @@ class PydapDataStore(AbstractDataStore):
     be useful if the netCDF4 library is not available.
     """
 
-    def __init__(self, ds):
+    def __init__(self, dataset, group=None):
         """
         Parameters
         ----------
         ds : pydap DatasetType
+        group: str or None (default None)
+            The group to open. If None, the root group is opened.
         """
-        self.ds = ds
+        self.dataset = dataset
+        self.group = group
 
     @classmethod
     def open(
         cls,
         url,
+        group=None,
         application=None,
         session=None,
         output_grid=None,
@@ -107,43 +104,90 @@ class PydapDataStore(AbstractDataStore):
         verify=None,
         user_charset=None,
     ):
-        import pydap.client
-        import pydap.lib
-
-        if timeout is None:
-            from pydap.lib import DEFAULT_TIMEOUT
-
-            timeout = DEFAULT_TIMEOUT
+        from pydap.client import open_url
+        from pydap.net import DEFAULT_TIMEOUT
 
+        if output_grid is not None:
+            # output_grid is no longer passed to pydap.client.open_url
+            from xarray.core.utils import emit_user_level_warning
+
+            emit_user_level_warning(
+                "`output_grid` is deprecated and will be removed in a future version"
+                " of xarray. Will be set to `None`, the new default. ",
+                DeprecationWarning,
+            )
+            output_grid = False  # new default behavior
         kwargs = {
             "url": url,
             "application": application,
             "session": session,
-            "output_grid": output_grid or True,
-            "timeout": timeout,
+            "output_grid": output_grid or False,
+            "timeout": timeout or DEFAULT_TIMEOUT,
+            "verify": verify or True,
+            "user_charset": user_charset,
         }
-        if verify is not None:
-            kwargs.update({"verify": verify})
-        if user_charset is not None:
-            kwargs.update({"user_charset": user_charset})
-        ds = pydap.client.open_url(**kwargs)
-        return cls(ds)
+        if isinstance(url, str):
+            # check uit begins with an acceptable scheme
+            dataset = open_url(**kwargs)
+        elif hasattr(url, "ds"):
+            # pydap dataset
+            dataset = url.ds
+        args = {"dataset": dataset}
+        if group:
+            # only then, change the default
+            args["group"] = group
+        return cls(**args)
 
     def open_store_variable(self, var):
         data = indexing.LazilyIndexedArray(PydapArrayWrapper(var))
-        return Variable(var.dimensions, data, _fix_attributes(var.attributes))
+        try:
+            dimensions = [
+                dim.split("/")[-1] if dim.startswith("/") else dim for dim in var.dims
+            ]
+        except AttributeError:
+            # GridType does not have a dims attribute - instead get `dimensions`
+            # see https://github.com/pydap/pydap/issues/485
+            dimensions = var.dimensions
+        return Variable(dimensions, data, var.attributes)
 
     def get_variables(self):
-        return FrozenDict(
-            (k, self.open_store_variable(self.ds[k])) for k in self.ds.keys()
-        )
+        # get first all variables arrays, excluding any container type like,
+        # `Groups`, `Sequence` or `Structure` types
+        try:
+            _vars = list(self.ds.variables())
+            _vars += list(self.ds.grids())  # dap2 objects
+        except AttributeError:
+            from pydap.model import GroupType
+
+            _vars = [
+                var
+                for var in self.ds.keys()
+                # check the key is not a BaseType or GridType
+                if not isinstance(self.ds[var], GroupType)
+            ]
+        return FrozenDict((k, self.open_store_variable(self.ds[k])) for k in _vars)
 
     def get_attrs(self):
-        return Frozen(_fix_attributes(self.ds.attributes))
+        """Remove any opendap specific attributes"""
+        opendap_attrs = (
+            "configuration",
+            "build_dmrpp",
+            "bes",
+            "libdap",
+            "invocation",
+            "dimensions",
+        )
+        attrs = self.ds.attributes
+        list(map(attrs.pop, opendap_attrs, [None] * 6))
+        return Frozen(attrs)
 
     def get_dimensions(self):
         return Frozen(self.ds.dimensions)
 
+    @property
+    def ds(self):
+        return get_group(self.dataset, self.group)
+
 
 class PydapBackendEntrypoint(BackendEntrypoint):
     """
@@ -154,7 +198,7 @@ class PydapBackendEntrypoint(BackendEntr
     This backend is selected by default for urls.
 
     For more information about the underlying library, visit:
-    https://www.pydap.org
+    https://pydap.github.io/pydap/en/intro.html
 
     See Also
     --------
@@ -164,15 +208,14 @@ class PydapBackendEntrypoint(BackendEntr
     description = "Open remote datasets via OPeNDAP using pydap in Xarray"
     url = "https://docs.xarray.dev/en/stable/generated/xarray.backends.PydapBackendEntrypoint.html"
 
-    def guess_can_open(
-        self,
-        filename_or_obj: str | os.PathLike[Any] | ReadBuffer | AbstractDataStore,
-    ) -> bool:
+    def guess_can_open(self, filename_or_obj: T_PathFileOrDataStore) -> bool:
         return isinstance(filename_or_obj, str) and is_remote_uri(filename_or_obj)
 
     def open_dataset(
         self,
-        filename_or_obj: str | os.PathLike[Any] | ReadBuffer | AbstractDataStore,
+        filename_or_obj: (
+            str | os.PathLike[Any] | ReadBuffer | bytes | memoryview | AbstractDataStore
+        ),
         *,
         mask_and_scale=True,
         decode_times=True,
@@ -181,6 +224,7 @@ class PydapBackendEntrypoint(BackendEntr
         drop_variables: str | Iterable[str] | None = None,
         use_cftime=None,
         decode_timedelta=None,
+        group=None,
         application=None,
         session=None,
         output_grid=None,
@@ -190,6 +234,7 @@ class PydapBackendEntrypoint(BackendEntr
     ) -> Dataset:
         store = PydapDataStore.open(
             url=filename_or_obj,
+            group=group,
             application=application,
             session=session,
             output_grid=output_grid,
@@ -197,7 +242,6 @@ class PydapBackendEntrypoint(BackendEntr
             verify=verify,
             user_charset=user_charset,
         )
-
         store_entrypoint = StoreBackendEntrypoint()
         with close_on_error(store):
             ds = store_entrypoint.open_dataset(
@@ -212,5 +256,140 @@ class PydapBackendEntrypoint(BackendEntr
             )
             return ds
 
+    def open_datatree(
+        self,
+        filename_or_obj: T_PathFileOrDataStore,
+        *,
+        mask_and_scale=True,
+        decode_times=True,
+        concat_characters=True,
+        decode_coords=True,
+        drop_variables: str | Iterable[str] | None = None,
+        use_cftime=None,
+        decode_timedelta=None,
+        group: str | None = None,
+        application=None,
+        session=None,
+        timeout=None,
+        verify=None,
+        user_charset=None,
+    ) -> DataTree:
+        groups_dict = self.open_groups_as_dict(
+            filename_or_obj,
+            mask_and_scale=mask_and_scale,
+            decode_times=decode_times,
+            concat_characters=concat_characters,
+            decode_coords=decode_coords,
+            drop_variables=drop_variables,
+            use_cftime=use_cftime,
+            decode_timedelta=decode_timedelta,
+            group=group,
+            application=None,
+            session=None,
+            timeout=None,
+            verify=None,
+            user_charset=None,
+        )
+
+        return datatree_from_dict_with_io_cleanup(groups_dict)
+
+    def open_groups_as_dict(
+        self,
+        filename_or_obj: T_PathFileOrDataStore,
+        *,
+        mask_and_scale=True,
+        decode_times=True,
+        concat_characters=True,
+        decode_coords=True,
+        drop_variables: str | Iterable[str] | None = None,
+        use_cftime=None,
+        decode_timedelta=None,
+        group: str | None = None,
+        application=None,
+        session=None,
+        timeout=None,
+        verify=None,
+        user_charset=None,
+    ) -> dict[str, Dataset]:
+        from xarray.core.treenode import NodePath
+
+        filename_or_obj = _normalize_path(filename_or_obj)
+        store = PydapDataStore.open(
+            url=filename_or_obj,
+            application=application,
+            session=session,
+            timeout=timeout,
+            verify=verify,
+            user_charset=user_charset,
+        )
+
+        # Check for a group and make it a parent if it exists
+        if group:
+            parent = str(NodePath("/") / NodePath(group))
+        else:
+            parent = str(NodePath("/"))
+
+        groups_dict = {}
+        group_names = [parent]
+        # construct fully qualified path to group
+        try:
+            # this works for pydap >= 3.5.1
+            Groups = store.ds[parent].groups()
+        except AttributeError:
+            # THIS IS ONLY NEEDED FOR `pydap == 3.5.0`
+            # `pydap>= 3.5.1` has a new method `groups()`
+            # that returns a dict of group names and their paths
+            def group_fqn(store, path=None, g_fqn=None) -> dict[str, str]:
+                """To be removed for pydap > 3.5.0.
+                Derives the fully qualifying name of a Group."""
+                from pydap.model import GroupType
+
+                if not path:
+                    path = "/"  # parent
+                if not g_fqn:
+                    g_fqn = {}
+                groups = [
+                    store[key].id
+                    for key in store.keys()
+                    if isinstance(store[key], GroupType)
+                ]
+                for g in groups:
+                    g_fqn.update({g: path})
+                    subgroups = [
+                        var for var in store[g] if isinstance(store[g][var], GroupType)
+                    ]
+                    if len(subgroups) > 0:
+                        npath = path + g
+                        g_fqn = group_fqn(store[g], npath, g_fqn)
+                return g_fqn
+
+            Groups = group_fqn(store.ds)
+        group_names += [
+            str(NodePath(path_to_group) / NodePath(group))
+            for group, path_to_group in Groups.items()
+        ]
+        for path_group in group_names:
+            # get a group from the store
+            store.group = path_group
+            store_entrypoint = StoreBackendEntrypoint()
+            with close_on_error(store):
+                group_ds = store_entrypoint.open_dataset(
+                    store,
+                    mask_and_scale=mask_and_scale,
+                    decode_times=decode_times,
+                    concat_characters=concat_characters,
+                    decode_coords=decode_coords,
+                    drop_variables=drop_variables,
+                    use_cftime=use_cftime,
+                    decode_timedelta=decode_timedelta,
+                )
+            if group:
+                group_name = str(NodePath(path_group).relative_to(parent))
+            else:
+                group_name = str(NodePath(path_group))
+            groups_dict[group_name] = group_ds
+
+        return groups_dict
+
 
 BACKEND_ENTRYPOINTS["pydap"] = ("pydap", PydapBackendEntrypoint)
diff -pruN 2025.03.1-8/xarray/backends/scipy_.py 2025.10.1-1/xarray/backends/scipy_.py
--- 2025.03.1-8/xarray/backends/scipy_.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/xarray/backends/scipy_.py	2025-10-10 10:38:05.000000000 +0000
@@ -12,6 +12,8 @@ from xarray.backends.common import (
     BACKEND_ENTRYPOINTS,
     BackendArray,
     BackendEntrypoint,
+    BytesIOProxy,
+    T_PathFileOrDataStore,
     WritableCFDataStore,
     _normalize_path,
 )
@@ -33,7 +35,15 @@ from xarray.core.utils import (
 )
 from xarray.core.variable import Variable
 
+try:
+    from scipy.io import netcdf_file as netcdf_file_base
+except ImportError:
+    netcdf_file_base = object  # type: ignore[assignment,misc,unused-ignore]  # scipy is optional
+
+
 if TYPE_CHECKING:
+    import scipy.io
+
     from xarray.backends.common import AbstractDataStore
     from xarray.core.dataset import Dataset
     from xarray.core.types import ReadBuffer
@@ -100,13 +110,38 @@ class ScipyArrayWrapper(BackendArray):
                     raise
 
 
-def _open_scipy_netcdf(filename, mode, mmap, version):
+# TODO: Make the scipy import lazy again after upstreaming these fixes.
+class flush_only_netcdf_file(netcdf_file_base):
+    # scipy.io.netcdf_file.close() incorrectly closes file objects that
+    # were passed in as constructor arguments:
+    # https://github.com/scipy/scipy/issues/13905
+
+    # Instead of closing such files, only call flush(), which is
+    # equivalent as long as the netcdf_file object is not mmapped.
+    # This suffices to keep BytesIO objects open long enough to read
+    # their contents from to_netcdf(), but underlying files still get
+    # closed when the netcdf_file is garbage collected (via __del__),
+    # and will need to be fixed upstream in scipy.
+    def close(self):
+        if hasattr(self, "fp") and not self.fp.closed:
+            self.flush()
+            self.fp.seek(0)  # allow file to be read again
+
+    def __del__(self):
+        # Remove the __del__ method, which in scipy is aliased to close().
+        # These files need to be closed explicitly by xarray.
+        pass
+
+
+def _open_scipy_netcdf(filename, mode, mmap, version, flush_only=False):
     import scipy.io
 
+    netcdf_file = flush_only_netcdf_file if flush_only else scipy.io.netcdf_file
+
     # if the string ends with .gz, then gunzip and open as netcdf file
     if isinstance(filename, str) and filename.endswith(".gz"):
         try:
-            return scipy.io.netcdf_file(
+            return netcdf_file(
                 gzip.open(filename), mode=mode, mmap=mmap, version=version
             )
         except TypeError as e:
@@ -119,12 +154,8 @@ def _open_scipy_netcdf(filename, mode, m
             else:
                 raise
 
-    if isinstance(filename, bytes) and filename.startswith(b"CDF"):
-        # it's a NetCDF3 bytestring
-        filename = io.BytesIO(filename)
-
     try:
-        return scipy.io.netcdf_file(filename, mode=mode, mmap=mmap, version=version)
+        return netcdf_file(filename, mode=mode, mmap=mmap, version=version)
     except TypeError as e:  # netcdf3 message is obscure in this case
         errmsg = e.args[0]
         if "is not a valid NetCDF 3 file" in errmsg:
@@ -141,7 +172,7 @@ def _open_scipy_netcdf(filename, mode, m
 
 
 class ScipyDataStore(WritableCFDataStore):
-    """Store for reading and writing data via scipy.io.netcdf.
+    """Store for reading and writing data via scipy.io.netcdf_file.
 
     This store has the advantage of being able to be initialized with a
     StringIO object, allow for serialization without writing to disk.
@@ -167,7 +198,12 @@ class ScipyDataStore(WritableCFDataStore
 
         self.lock = ensure_lock(lock)
 
-        if isinstance(filename_or_obj, str):
+        if isinstance(filename_or_obj, BytesIOProxy):
+            source = filename_or_obj
+            filename_or_obj = io.BytesIO()
+            source.getvalue = filename_or_obj.getbuffer
+
+        if isinstance(filename_or_obj, str):  # path
             manager = CachingFileManager(
                 _open_scipy_netcdf,
                 filename_or_obj,
@@ -175,22 +211,33 @@ class ScipyDataStore(WritableCFDataStore
                 lock=lock,
                 kwargs=dict(mmap=mmap, version=version),
             )
-        else:
+        elif hasattr(filename_or_obj, "seek"):  # file object
+            # Note: checking for .seek matches the check for file objects
+            # in scipy.io.netcdf_file
             scipy_dataset = _open_scipy_netcdf(
-                filename_or_obj, mode=mode, mmap=mmap, version=version
+                filename_or_obj,
+                mode=mode,
+                mmap=mmap,
+                version=version,
+                flush_only=True,
             )
+            assert not scipy_dataset.use_mmap  # no mmap for file objects
             manager = DummyFileManager(scipy_dataset)
+        else:
+            raise ValueError(
+                f"cannot open {filename_or_obj=} with scipy.io.netcdf_file"
+            )
 
         self._manager = manager
 
     @property
-    def ds(self):
+    def ds(self) -> scipy.io.netcdf_file:
         return self._manager.acquire()
 
     def open_store_variable(self, name, var):
         return Variable(
             var.dimensions,
-            ScipyArrayWrapper(name, self),
+            indexing.LazilyIndexedArray(ScipyArrayWrapper(name, self)),
             _decode_attrs(var._attributes),
         )
 
@@ -227,8 +274,8 @@ class ScipyDataStore(WritableCFDataStore
         value = encode_nc3_attr_value(value)
         setattr(self.ds, key, value)
 
-    def encode_variable(self, variable):
-        variable = encode_nc3_variable(variable)
+    def encode_variable(self, variable, name=None):
+        variable = encode_nc3_variable(variable, name=name)
         return variable
 
     def prepare_variable(
@@ -245,8 +292,8 @@ class ScipyDataStore(WritableCFDataStore
 
         data = variable.data
         # nb. this still creates a numpy array in all memory, even though we
-        # don't write the data yet; scipy.io.netcdf does not not support
-        # incremental writes.
+        # don't write the data yet; scipy.io.netcdf does not support incremental
+        # writes.
         if name not in self.ds.variables:
             self.ds.createVariable(name, data.dtype, variable.dims)
         scipy_var = self.ds.variables[name]
@@ -265,6 +312,20 @@ class ScipyDataStore(WritableCFDataStore
         self._manager.close()
 
 
+def _normalize_filename_or_obj(
+    filename_or_obj: str
+    | os.PathLike[Any]
+    | ReadBuffer
+    | bytes
+    | memoryview
+    | AbstractDataStore,
+) -> str | ReadBuffer | AbstractDataStore:
+    if isinstance(filename_or_obj, bytes | memoryview):
+        return io.BytesIO(filename_or_obj)
+    else:
+        return _normalize_path(filename_or_obj)
+
+
 class ScipyBackendEntrypoint(BackendEntrypoint):
     """
     Backend for netCDF files based on the scipy package.
@@ -291,8 +352,9 @@ class ScipyBackendEntrypoint(BackendEntr
 
     def guess_can_open(
         self,
-        filename_or_obj: str | os.PathLike[Any] | ReadBuffer | AbstractDataStore,
+        filename_or_obj: T_PathFileOrDataStore,
     ) -> bool:
+        filename_or_obj = _normalize_filename_or_obj(filename_or_obj)
         magic_number = try_read_magic_number_from_file_or_path(filename_or_obj)
         if magic_number is not None and magic_number.startswith(b"\x1f\x8b"):
             with gzip.open(filename_or_obj) as f:  # type: ignore[arg-type]
@@ -308,7 +370,7 @@ class ScipyBackendEntrypoint(BackendEntr
 
     def open_dataset(
         self,
-        filename_or_obj: str | os.PathLike[Any] | ReadBuffer | AbstractDataStore,
+        filename_or_obj: T_PathFileOrDataStore,
         *,
         mask_and_scale=True,
         decode_times=True,
@@ -323,7 +385,7 @@ class ScipyBackendEntrypoint(BackendEntr
         mmap=None,
         lock=None,
     ) -> Dataset:
-        filename_or_obj = _normalize_path(filename_or_obj)
+        filename_or_obj = _normalize_filename_or_obj(filename_or_obj)
         store = ScipyDataStore(
             filename_or_obj, mode=mode, format=format, group=group, mmap=mmap, lock=lock
         )
diff -pruN 2025.03.1-8/xarray/backends/store.py 2025.10.1-1/xarray/backends/store.py
--- 2025.03.1-8/xarray/backends/store.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/xarray/backends/store.py	2025-10-10 10:38:05.000000000 +0000
@@ -1,41 +1,39 @@
 from __future__ import annotations
 
 from collections.abc import Iterable
-from typing import TYPE_CHECKING, Any
+from typing import TYPE_CHECKING
 
 from xarray import conventions
 from xarray.backends.common import (
     BACKEND_ENTRYPOINTS,
     AbstractDataStore,
     BackendEntrypoint,
+    T_PathFileOrDataStore,
 )
+from xarray.core.coordinates import Coordinates
 from xarray.core.dataset import Dataset
 
 if TYPE_CHECKING:
-    import os
-
-    from xarray.core.types import ReadBuffer
+    pass
 
 
 class StoreBackendEntrypoint(BackendEntrypoint):
     description = "Open AbstractDataStore instances in Xarray"
     url = "https://docs.xarray.dev/en/stable/generated/xarray.backends.StoreBackendEntrypoint.html"
 
-    def guess_can_open(
-        self,
-        filename_or_obj: str | os.PathLike[Any] | ReadBuffer | AbstractDataStore,
-    ) -> bool:
+    def guess_can_open(self, filename_or_obj: T_PathFileOrDataStore) -> bool:
         return isinstance(filename_or_obj, AbstractDataStore)
 
     def open_dataset(
         self,
-        filename_or_obj: str | os.PathLike[Any] | ReadBuffer | AbstractDataStore,
+        filename_or_obj: T_PathFileOrDataStore,
         *,
         mask_and_scale=True,
         decode_times=True,
         concat_characters=True,
         decode_coords=True,
         drop_variables: str | Iterable[str] | None = None,
+        set_indexes: bool = True,
         use_cftime=None,
         decode_timedelta=None,
     ) -> Dataset:
@@ -56,8 +54,19 @@ class StoreBackendEntrypoint(BackendEntr
             decode_timedelta=decode_timedelta,
         )
 
-        ds = Dataset(vars, attrs=attrs)
-        ds = ds.set_coords(coord_names.intersection(vars))
+        # split data and coordinate variables (promote dimension coordinates)
+        data_vars = {}
+        coord_vars = {}
+        for name, var in vars.items():
+            if name in coord_names or var.dims == (name,):
+                coord_vars[name] = var
+            else:
+                data_vars[name] = var
+
+        # explicit Coordinates object with no index passed
+        coords = Coordinates(coord_vars, indexes={})
+
+        ds = Dataset(data_vars, coords=coords, attrs=attrs)
         ds.set_close(filename_or_obj.close)
         ds.encoding = encoding
 
diff -pruN 2025.03.1-8/xarray/backends/writers.py 2025.10.1-1/xarray/backends/writers.py
--- 2025.03.1-8/xarray/backends/writers.py	1970-01-01 00:00:00.000000000 +0000
+++ 2025.10.1-1/xarray/backends/writers.py	2025-10-10 10:38:05.000000000 +0000
@@ -0,0 +1,1034 @@
+from __future__ import annotations
+
+import importlib
+import io
+import os
+from collections.abc import Callable, Hashable, Iterable, Mapping, MutableMapping
+from io import IOBase
+from itertools import starmap
+from numbers import Number
+from os import PathLike
+from typing import TYPE_CHECKING, Any, Literal, get_args, overload
+
+import numpy as np
+
+from xarray import backends, conventions
+from xarray.backends.api import (
+    _normalize_path,
+    delayed_close_after_writes,
+)
+from xarray.backends.common import AbstractWritableDataStore, ArrayWriter, BytesIOProxy
+from xarray.backends.locks import get_dask_scheduler
+from xarray.backends.store import AbstractDataStore
+from xarray.core.dataset import Dataset
+from xarray.core.datatree import DataTree
+from xarray.core.options import OPTIONS
+from xarray.core.types import NetcdfWriteModes, ZarrWriteModes
+from xarray.core.utils import emit_user_level_warning
+
+if TYPE_CHECKING:
+    from dask.delayed import Delayed
+
+    from xarray.backends import ZarrStore
+    from xarray.backends.api import T_NetcdfEngine, T_NetcdfTypes
+    from xarray.core.types import ZarrStoreLike
+
+
+T_DataTreeNetcdfEngine = Literal["netcdf4", "h5netcdf", "pydap"]
+T_DataTreeNetcdfTypes = Literal["NETCDF4"]
+
+
+WRITEABLE_STORES: dict[T_NetcdfEngine, Callable] = {
+    "netcdf4": backends.NetCDF4DataStore.open,
+    "scipy": backends.ScipyDataStore,
+    "h5netcdf": backends.H5NetCDFStore.open,
+}
+
+
+def get_writable_netcdf_store(
+    target,
+    engine: T_NetcdfEngine,
+    *,
+    format: T_NetcdfTypes | None,
+    mode: NetcdfWriteModes,
+    autoclose: bool,
+    invalid_netcdf: bool,
+    auto_complex: bool | None,
+) -> AbstractWritableDataStore:
+    """Create a store for writing to a netCDF file."""
+    try:
+        store_open = WRITEABLE_STORES[engine]
+    except KeyError as err:
+        raise ValueError(f"unrecognized engine for to_netcdf: {engine!r}") from err
+
+    if format is not None:
+        format = format.upper()  # type: ignore[assignment]
+
+    kwargs = dict(autoclose=True) if autoclose else {}
+    if invalid_netcdf:
+        if engine == "h5netcdf":
+            kwargs["invalid_netcdf"] = invalid_netcdf
+        else:
+            raise ValueError(
+                f"unrecognized option 'invalid_netcdf' for engine {engine}"
+            )
+    if auto_complex is not None:
+        kwargs["auto_complex"] = auto_complex
+
+    return store_open(target, mode=mode, format=format, **kwargs)
+
+
+def _validate_dataset_names(dataset: Dataset) -> None:
+    """DataArray.name and Dataset keys must be a string or None"""
+
+    def check_name(name: Hashable):
+        if isinstance(name, str):
+            if not name:
+                raise ValueError(
+                    f"Invalid name {name!r} for DataArray or Dataset key: "
+                    "string must be length 1 or greater for "
+                    "serialization to netCDF or zarr files"
+                )
+        elif name is not None:
+            raise TypeError(
+                f"Invalid name {name!r} for DataArray or Dataset key: "
+                "must be either a string or None for serialization to netCDF "
+                "or zarr files"
+            )
+
+    for k in dataset.variables:
+        check_name(k)
+
+
+def _validate_attrs(dataset, engine, invalid_netcdf=False):
+    """`attrs` must have a string key and a value which is either: a number,
+    a string, an ndarray, a list/tuple of numbers/strings, or a numpy.bool_.
+
+    Notes
+    -----
+    A numpy.bool_ is only allowed when using the h5netcdf engine with
+    `invalid_netcdf=True`.
+    """
+
+    valid_types = (str, Number, np.ndarray, np.number, list, tuple, bytes)
+    if invalid_netcdf and engine == "h5netcdf":
+        valid_types += (np.bool_,)
+
+    def check_attr(name, value, valid_types):
+        if isinstance(name, str):
+            if not name:
+                raise ValueError(
+                    f"Invalid name for attr {name!r}: string must be "
+                    "length 1 or greater for serialization to "
+                    "netCDF files"
+                )
+        else:
+            raise TypeError(
+                f"Invalid name for attr: {name!r} must be a string for "
+                "serialization to netCDF files"
+            )
+
+        if not isinstance(value, valid_types):
+            raise TypeError(
+                f"Invalid value for attr {name!r}: {value!r}. For serialization to "
+                "netCDF files, its value must be of one of the following types: "
+                f"{', '.join([vtype.__name__ for vtype in valid_types])}"
+            )
+
+        if isinstance(value, bytes) and engine == "h5netcdf":
+            try:
+                value.decode("utf-8")
+            except UnicodeDecodeError as e:
+                raise ValueError(
+                    f"Invalid value provided for attribute '{name!r}': {value!r}. "
+                    "Only binary data derived from UTF-8 encoded strings is allowed "
+                    f"for the '{engine}' engine. Consider using the 'netcdf4' engine."
+                ) from e
+
+            if b"\x00" in value:
+                raise ValueError(
+                    f"Invalid value provided for attribute '{name!r}': {value!r}. "
+                    f"Null characters are not permitted for the '{engine}' engine. "
+                    "Consider using the 'netcdf4' engine."
+                )
+
+    # Check attrs on the dataset itself
+    for k, v in dataset.attrs.items():
+        check_attr(k, v, valid_types)
+
+    # Check attrs on each variable within the dataset
+    for variable in dataset.variables.values():
+        for k, v in variable.attrs.items():
+            check_attr(k, v, valid_types)
+
+
+def get_default_netcdf_write_engine(
+    path_or_file: str | IOBase | None,
+    format: T_NetcdfTypes | None,
+) -> Literal["netcdf4", "h5netcdf", "scipy"]:
+    """Return the default netCDF library to use for writing a netCDF file."""
+
+    module_names = {
+        "netcdf4": "netCDF4",
+        "scipy": "scipy",
+        "h5netcdf": "h5netcdf",
+    }
+    candidates = list(OPTIONS["netcdf_engine_order"])
+
+    if format is not None:
+        format = format.upper()  # type: ignore[assignment]
+        if format not in {
+            "NETCDF4",
+            "NETCDF4_CLASSIC",
+            "NETCDF3_64BIT",
+            "NETCDF3_CLASSIC",
+        }:
+            raise ValueError(f"unexpected {format=}")
+        # TODO: allow format='NETCDF4_CLASSIC' to default to using h5netcdf,
+        # when the oldest supported version of h5netcdf supports it:
+        # https://github.com/h5netcdf/h5netcdf/pull/283
+        if format != "NETCDF4":
+            candidates.remove("h5netcdf")
+        if format not in {"NETCDF3_64BIT", "NETCDF3_CLASSIC"}:
+            candidates.remove("scipy")
+
+    nczarr_mode = isinstance(path_or_file, str) and path_or_file.endswith(
+        "#mode=nczarr"
+    )
+    if nczarr_mode:
+        candidates[:] = ["netcdf4"]
+
+    if isinstance(path_or_file, IOBase):
+        candidates.remove("netcdf4")
+
+    for engine in candidates:
+        module_name = module_names[engine]
+        if importlib.util.find_spec(module_name) is not None:
+            return engine
+
+    if nczarr_mode:
+        format_str = " in NCZarr format"
+    else:
+        format_str = f" with {format=}" if format is not None else ""
+    libraries = ", ".join(module_names[c] for c in candidates)
+    raise ValueError(
+        f"cannot write NetCDF files{format_str} because none of the suitable "
+        f"backend libraries ({libraries}) are installed"
+    )
+
+
+def _sanitize_unlimited_dims(dataset, unlimited_dims):
+    msg_origin = "unlimited_dims-kwarg"
+    if unlimited_dims is None:
+        unlimited_dims = dataset.encoding.get("unlimited_dims", None)
+        msg_origin = "dataset.encoding"
+    if unlimited_dims is not None:
+        if isinstance(unlimited_dims, str) or not isinstance(unlimited_dims, Iterable):
+            unlimited_dims = [unlimited_dims]
+        else:
+            unlimited_dims = list(unlimited_dims)
+        dataset_dims = set(dataset.dims)
+        unlimited_dims = set(unlimited_dims)
+        if undeclared_dims := (unlimited_dims - dataset_dims):
+            msg = (
+                f"Unlimited dimension(s) {undeclared_dims!r} declared in {msg_origin!r}, "
+                f"but not part of current dataset dimensions. "
+                f"Consider removing {undeclared_dims!r} from {msg_origin!r}."
+            )
+            if msg_origin == "unlimited_dims-kwarg":
+                raise ValueError(msg)
+            else:
+                emit_user_level_warning(msg)
+        return unlimited_dims
+
+
+# multifile=True returns writer and datastore
+@overload
+def to_netcdf(
+    dataset: Dataset,
+    path_or_file: str | os.PathLike | None = None,
+    mode: NetcdfWriteModes = "w",
+    format: T_NetcdfTypes | None = None,
+    group: str | None = None,
+    engine: T_NetcdfEngine | None = None,
+    encoding: Mapping[Hashable, Mapping[str, Any]] | None = None,
+    unlimited_dims: Iterable[Hashable] | None = None,
+    compute: bool = True,
+    *,
+    multifile: Literal[True],
+    invalid_netcdf: bool = False,
+    auto_complex: bool | None = None,
+) -> tuple[ArrayWriter, AbstractDataStore]: ...
+
+
+# path=None writes to bytes or memoryview, depending on store
+@overload
+def to_netcdf(
+    dataset: Dataset,
+    path_or_file: None = None,
+    mode: NetcdfWriteModes = "w",
+    format: T_NetcdfTypes | None = None,
+    group: str | None = None,
+    engine: T_NetcdfEngine | None = None,
+    encoding: Mapping[Hashable, Mapping[str, Any]] | None = None,
+    unlimited_dims: Iterable[Hashable] | None = None,
+    compute: bool = True,
+    multifile: Literal[False] = False,
+    invalid_netcdf: bool = False,
+    auto_complex: bool | None = None,
+) -> memoryview: ...
+
+
+# compute=False returns dask.Delayed
+@overload
+def to_netcdf(
+    dataset: Dataset,
+    path_or_file: str | os.PathLike,
+    mode: NetcdfWriteModes = "w",
+    format: T_NetcdfTypes | None = None,
+    group: str | None = None,
+    engine: T_NetcdfEngine | None = None,
+    encoding: Mapping[Hashable, Mapping[str, Any]] | None = None,
+    unlimited_dims: Iterable[Hashable] | None = None,
+    *,
+    compute: Literal[False],
+    multifile: Literal[False] = False,
+    invalid_netcdf: bool = False,
+    auto_complex: bool | None = None,
+) -> Delayed: ...
+
+
+# default return None
+@overload
+def to_netcdf(
+    dataset: Dataset,
+    path_or_file: str | os.PathLike | IOBase,
+    mode: NetcdfWriteModes = "w",
+    format: T_NetcdfTypes | None = None,
+    group: str | None = None,
+    engine: T_NetcdfEngine | None = None,
+    encoding: Mapping[Hashable, Mapping[str, Any]] | None = None,
+    unlimited_dims: Iterable[Hashable] | None = None,
+    compute: Literal[True] = True,
+    multifile: Literal[False] = False,
+    invalid_netcdf: bool = False,
+    auto_complex: bool | None = None,
+) -> None: ...
+
+
+# if compute cannot be evaluated at type check time
+# we may get back either Delayed or None
+@overload
+def to_netcdf(
+    dataset: Dataset,
+    path_or_file: str | os.PathLike,
+    mode: NetcdfWriteModes = "w",
+    format: T_NetcdfTypes | None = None,
+    group: str | None = None,
+    engine: T_NetcdfEngine | None = None,
+    encoding: Mapping[Hashable, Mapping[str, Any]] | None = None,
+    unlimited_dims: Iterable[Hashable] | None = None,
+    compute: bool = False,
+    multifile: Literal[False] = False,
+    invalid_netcdf: bool = False,
+    auto_complex: bool | None = None,
+) -> Delayed | None: ...
+
+
+# if multifile cannot be evaluated at type check time
+# we may get back either writer and datastore or Delayed or None
+@overload
+def to_netcdf(
+    dataset: Dataset,
+    path_or_file: str | os.PathLike,
+    mode: NetcdfWriteModes = "w",
+    format: T_NetcdfTypes | None = None,
+    group: str | None = None,
+    engine: T_NetcdfEngine | None = None,
+    encoding: Mapping[Hashable, Mapping[str, Any]] | None = None,
+    unlimited_dims: Iterable[Hashable] | None = None,
+    compute: bool = False,
+    multifile: bool = False,
+    invalid_netcdf: bool = False,
+    auto_complex: bool | None = None,
+) -> tuple[ArrayWriter, AbstractDataStore] | Delayed | None: ...
+
+
+# Any
+@overload
+def to_netcdf(
+    dataset: Dataset,
+    path_or_file: str | os.PathLike | IOBase | None,
+    mode: NetcdfWriteModes = "w",
+    format: T_NetcdfTypes | None = None,
+    group: str | None = None,
+    engine: T_NetcdfEngine | None = None,
+    encoding: Mapping[Hashable, Mapping[str, Any]] | None = None,
+    unlimited_dims: Iterable[Hashable] | None = None,
+    compute: bool = False,
+    multifile: bool = False,
+    invalid_netcdf: bool = False,
+    auto_complex: bool | None = None,
+) -> tuple[ArrayWriter, AbstractDataStore] | memoryview | Delayed | None: ...
+
+
+def to_netcdf(
+    dataset: Dataset,
+    path_or_file: str | os.PathLike | IOBase | None = None,
+    mode: NetcdfWriteModes = "w",
+    format: T_NetcdfTypes | None = None,
+    group: str | None = None,
+    engine: T_NetcdfEngine | None = None,
+    encoding: Mapping[Hashable, Mapping[str, Any]] | None = None,
+    unlimited_dims: Iterable[Hashable] | None = None,
+    compute: bool = True,
+    multifile: bool = False,
+    invalid_netcdf: bool = False,
+    auto_complex: bool | None = None,
+) -> tuple[ArrayWriter, AbstractDataStore] | memoryview | Delayed | None:
+    """This function creates an appropriate datastore for writing a dataset to
+    disk as a netCDF file
+
+    See `Dataset.to_netcdf` for full API docs.
+
+    The ``multifile`` argument is only for the private use of save_mfdataset.
+    """
+    if encoding is None:
+        encoding = {}
+
+    normalized_path = _normalize_path(path_or_file)
+
+    if engine is None:
+        engine = get_default_netcdf_write_engine(normalized_path, format)
+
+    # validate Dataset keys, DataArray names, and attr keys/values
+    _validate_dataset_names(dataset)
+    _validate_attrs(dataset, engine, invalid_netcdf)
+    # sanitize unlimited_dims
+    unlimited_dims = _sanitize_unlimited_dims(dataset, unlimited_dims)
+
+    autoclose = _get_netcdf_autoclose(dataset, engine)
+
+    if normalized_path is None:
+        if not compute:
+            raise NotImplementedError(
+                "to_netcdf() with compute=False is not yet implemented when "
+                "returning a memoryview"
+            )
+        target = BytesIOProxy()
+    else:
+        target = normalized_path  # type: ignore[assignment]
+
+    store = get_writable_netcdf_store(
+        target,
+        engine,
+        mode=mode,
+        format=format,
+        autoclose=autoclose,
+        invalid_netcdf=invalid_netcdf,
+        auto_complex=auto_complex,
+    )
+    if group is not None:
+        store = store.get_child_store(group)
+
+    writer = ArrayWriter()
+
+    # TODO: figure out how to refactor this logic (here and in save_mfdataset)
+    # to avoid this mess of conditionals
+    try:
+        # TODO: allow this work (setting up the file for writing array data)
+        # to be parallelized with dask
+        dump_to_store(
+            dataset, store, writer, encoding=encoding, unlimited_dims=unlimited_dims
+        )
+        if autoclose:
+            store.close()
+
+        if multifile:
+            return writer, store
+
+        writes = writer.sync(compute=compute)
+
+    finally:
+        if not multifile and not autoclose:  # type: ignore[redundant-expr,unused-ignore]
+            if compute:
+                store.close()
+            else:
+                store.sync()
+
+    if path_or_file is None:
+        assert isinstance(target, BytesIOProxy)  # created in this function
+        return target.getbuffer()
+
+    if not compute:
+        return delayed_close_after_writes(writes, store)
+
+    return None
+
+
+def dump_to_store(
+    dataset, store, writer=None, encoder=None, encoding=None, unlimited_dims=None
+):
+    """Store dataset contents to a backends.*DataStore object."""
+    if writer is None:
+        writer = ArrayWriter()
+
+    if encoding is None:
+        encoding = {}
+
+    variables, attrs = conventions.encode_dataset_coordinates(dataset)
+
+    check_encoding = set()
+    for k, enc in encoding.items():
+        # no need to shallow copy the variable again; that already happened
+        # in encode_dataset_coordinates
+        variables[k].encoding = enc
+        check_encoding.add(k)
+
+    if encoder:
+        variables, attrs = encoder(variables, attrs)
+
+    store.store(variables, attrs, check_encoding, writer, unlimited_dims=unlimited_dims)
+
+
+def save_mfdataset(
+    datasets,
+    paths,
+    mode="w",
+    format=None,
+    groups=None,
+    engine=None,
+    compute=True,
+    **kwargs,
+):
+    """Write multiple datasets to disk as netCDF files simultaneously.
+
+    This function is intended for use with datasets consisting of dask.array
+    objects, in which case it can write the multiple datasets to disk
+    simultaneously using a shared thread pool.
+
+    When not using dask, it is no different than calling ``to_netcdf``
+    repeatedly.
+
+    Parameters
+    ----------
+    datasets : list of Dataset
+        List of datasets to save.
+    paths : list of str or list of path-like objects
+        List of paths to which to save each corresponding dataset.
+    mode : {"w", "a"}, optional
+        Write ("w") or append ("a") mode. If mode="w", any existing file at
+        these locations will be overwritten.
+    format : {"NETCDF4", "NETCDF4_CLASSIC", "NETCDF3_64BIT", \
+              "NETCDF3_CLASSIC"}, optional
+        File format for the resulting netCDF file:
+
+        * NETCDF4: Data is stored in an HDF5 file, using netCDF4 API
+          features.
+        * NETCDF4_CLASSIC: Data is stored in an HDF5 file, using only
+          netCDF 3 compatible API features.
+        * NETCDF3_64BIT: 64-bit offset version of the netCDF 3 file format,
+          which fully supports 2+ GB files, but is only compatible with
+          clients linked against netCDF version 3.6.0 or later.
+        * NETCDF3_CLASSIC: The classic netCDF 3 file format. It does not
+          handle 2+ GB files very well.
+
+        All formats are supported by the netCDF4-python library.
+        scipy.io.netcdf only supports the last two formats.
+
+        The default format is NETCDF4 if you are saving a file to disk and
+        have the netCDF4-python library available. Otherwise, xarray falls
+        back to using scipy to write netCDF files and defaults to the
+        NETCDF3_64BIT format (scipy does not support netCDF4).
+    groups : list of str, optional
+        Paths to the netCDF4 group in each corresponding file to which to save
+        datasets (only works for format="NETCDF4"). The groups will be created
+        if necessary.
+    engine : {"netcdf4", "h5netcdf", "scipy"}, optional
+        Engine to use when writing netCDF files. If not provided, the
+        default engine is chosen based on available dependencies, by default
+        preferring "netcdf4" over "h5netcdf" over "scipy" (customizable via
+        ``netcdf_engine_order`` in ``xarray.set_options()``).
+    compute : bool
+        If true compute immediately, otherwise return a
+        ``dask.delayed.Delayed`` object that can be computed later.
+    **kwargs : dict, optional
+        Additional arguments are passed along to ``to_netcdf``.
+
+    Examples
+    --------
+    Save a dataset into one netCDF per year of data:
+
+    >>> ds = xr.Dataset(
+    ...     {"a": ("time", np.linspace(0, 1, 48))},
+    ...     coords={"time": pd.date_range("2010-01-01", freq="ME", periods=48)},
+    ... )
+    >>> ds
+    <xarray.Dataset> Size: 768B
+    Dimensions:  (time: 48)
+    Coordinates:
+      * time     (time) datetime64[ns] 384B 2010-01-31 2010-02-28 ... 2013-12-31
+    Data variables:
+        a        (time) float64 384B 0.0 0.02128 0.04255 ... 0.9574 0.9787 1.0
+    >>> years, datasets = zip(*ds.groupby("time.year"))
+    >>> paths = [f"{y}.nc" for y in years]
+    >>> xr.save_mfdataset(datasets, paths)
+    """
+    if mode == "w" and len(set(paths)) < len(paths):
+        raise ValueError(
+            "cannot use mode='w' when writing multiple datasets to the same path"
+        )
+
+    for obj in datasets:
+        if not isinstance(obj, Dataset):
+            raise TypeError(
+                "save_mfdataset only supports writing Dataset "
+                f"objects, received type {type(obj)}"
+            )
+
+    if groups is None:
+        groups = [None] * len(datasets)
+
+    if len({len(datasets), len(paths), len(groups)}) > 1:
+        raise ValueError(
+            "must supply lists of the same length for the "
+            "datasets, paths and groups arguments to "
+            "save_mfdataset"
+        )
+
+    writers, stores = zip(
+        *[
+            to_netcdf(
+                ds,
+                path,
+                mode,
+                format,
+                group,
+                engine,
+                compute=compute,
+                multifile=True,
+                **kwargs,
+            )
+            for ds, path, group in zip(datasets, paths, groups, strict=True)
+        ],
+        strict=True,
+    )
+
+    try:
+        writes = [w.sync(compute=compute) for w in writers]
+    finally:
+        for store in stores:
+            if compute:
+                store.close()
+            else:
+                store.sync()
+
+    if not compute:
+        import dask
+
+        return dask.delayed(
+            list(starmap(delayed_close_after_writes, zip(writes, stores, strict=True)))
+        )
+
+
+def get_writable_zarr_store(
+    store: ZarrStoreLike | None = None,
+    *,
+    chunk_store: MutableMapping | str | os.PathLike | None = None,
+    mode: ZarrWriteModes | None = None,
+    synchronizer=None,
+    group: str | None = None,
+    consolidated: bool | None = None,
+    append_dim: Hashable | None = None,
+    region: Mapping[str, slice | Literal["auto"]] | Literal["auto"] | None = None,
+    safe_chunks: bool = True,
+    align_chunks: bool = False,
+    storage_options: dict[str, str] | None = None,
+    zarr_version: int | None = None,
+    zarr_format: int | None = None,
+    write_empty_chunks: bool | None = None,
+) -> backends.ZarrStore:
+    """Create a store for writing to Zarr."""
+    from xarray.backends.zarr import _choose_default_mode, _get_mappers
+
+    kwargs, mapper, chunk_mapper = _get_mappers(
+        storage_options=storage_options, store=store, chunk_store=chunk_store
+    )
+    mode = _choose_default_mode(mode=mode, append_dim=append_dim, region=region)
+
+    if mode == "r+":
+        already_consolidated = consolidated
+        consolidate_on_close = False
+    else:
+        already_consolidated = False
+        consolidate_on_close = consolidated or consolidated is None
+
+    return backends.ZarrStore.open_group(
+        store=mapper,
+        mode=mode,
+        synchronizer=synchronizer,
+        group=group,
+        consolidated=already_consolidated,
+        consolidate_on_close=consolidate_on_close,
+        chunk_store=chunk_mapper,
+        append_dim=append_dim,
+        write_region=region,
+        safe_chunks=safe_chunks,
+        align_chunks=align_chunks,
+        zarr_version=zarr_version,
+        zarr_format=zarr_format,
+        write_empty=write_empty_chunks,
+        **kwargs,
+    )
+
+
+# compute=True returns ZarrStore
+@overload
+def to_zarr(
+    dataset: Dataset,
+    store: ZarrStoreLike | None = None,
+    chunk_store: MutableMapping | str | os.PathLike | None = None,
+    mode: ZarrWriteModes | None = None,
+    synchronizer=None,
+    group: str | None = None,
+    encoding: Mapping | None = None,
+    *,
+    compute: Literal[True] = True,
+    consolidated: bool | None = None,
+    append_dim: Hashable | None = None,
+    region: Mapping[str, slice | Literal["auto"]] | Literal["auto"] | None = None,
+    safe_chunks: bool = True,
+    align_chunks: bool = False,
+    storage_options: dict[str, str] | None = None,
+    zarr_version: int | None = None,
+    write_empty_chunks: bool | None = None,
+    chunkmanager_store_kwargs: dict[str, Any] | None = None,
+) -> backends.ZarrStore: ...
+
+
+# compute=False returns dask.Delayed
+@overload
+def to_zarr(
+    dataset: Dataset,
+    store: ZarrStoreLike | None = None,
+    chunk_store: MutableMapping | str | os.PathLike | None = None,
+    mode: ZarrWriteModes | None = None,
+    synchronizer=None,
+    group: str | None = None,
+    encoding: Mapping | None = None,
+    *,
+    compute: Literal[False],
+    consolidated: bool | None = None,
+    append_dim: Hashable | None = None,
+    region: Mapping[str, slice | Literal["auto"]] | Literal["auto"] | None = None,
+    safe_chunks: bool = True,
+    align_chunks: bool = False,
+    storage_options: dict[str, str] | None = None,
+    zarr_version: int | None = None,
+    write_empty_chunks: bool | None = None,
+    chunkmanager_store_kwargs: dict[str, Any] | None = None,
+) -> Delayed: ...
+
+
+def to_zarr(
+    dataset: Dataset,
+    store: ZarrStoreLike | None = None,
+    chunk_store: MutableMapping | str | os.PathLike | None = None,
+    mode: ZarrWriteModes | None = None,
+    synchronizer=None,
+    group: str | None = None,
+    encoding: Mapping | None = None,
+    *,
+    compute: bool = True,
+    consolidated: bool | None = None,
+    append_dim: Hashable | None = None,
+    region: Mapping[str, slice | Literal["auto"]] | Literal["auto"] | None = None,
+    safe_chunks: bool = True,
+    align_chunks: bool = False,
+    storage_options: dict[str, str] | None = None,
+    zarr_version: int | None = None,
+    zarr_format: int | None = None,
+    write_empty_chunks: bool | None = None,
+    chunkmanager_store_kwargs: dict[str, Any] | None = None,
+) -> backends.ZarrStore | Delayed:
+    """This function creates an appropriate datastore for writing a dataset to
+    a zarr ztore
+
+    See `Dataset.to_zarr` for full API docs.
+    """
+
+    # validate Dataset keys, DataArray names
+    _validate_dataset_names(dataset)
+
+    # Load empty arrays to avoid bug saving zero length dimensions (Issue #5741)
+    # TODO: delete when min dask>=2023.12.1
+    # https://github.com/dask/dask/pull/10506
+    for v in dataset.variables.values():
+        if v.size == 0:
+            v.load()
+
+    if encoding is None:
+        encoding = {}
+
+    zstore = get_writable_zarr_store(
+        store,
+        chunk_store=chunk_store,
+        mode=mode,
+        synchronizer=synchronizer,
+        group=group,
+        consolidated=consolidated,
+        append_dim=append_dim,
+        region=region,
+        safe_chunks=safe_chunks,
+        align_chunks=align_chunks,
+        storage_options=storage_options,
+        zarr_version=zarr_version,
+        zarr_format=zarr_format,
+        write_empty_chunks=write_empty_chunks,
+    )
+
+    dataset = zstore._validate_and_autodetect_region(dataset)
+    zstore._validate_encoding(encoding)
+
+    writer = ArrayWriter()
+
+    # TODO: figure out how to properly handle unlimited_dims
+    try:
+        dump_to_store(dataset, zstore, writer, encoding=encoding)
+        writes = writer.sync(
+            compute=compute, chunkmanager_store_kwargs=chunkmanager_store_kwargs
+        )
+    finally:
+        if compute:
+            zstore.close()
+
+    if not compute:
+        return delayed_close_after_writes(writes, zstore)
+
+    return zstore
+
+
+def _datatree_to_netcdf(
+    dt: DataTree,
+    filepath: str | PathLike | io.IOBase | None = None,
+    mode: NetcdfWriteModes = "w",
+    encoding: Mapping[str, Any] | None = None,
+    unlimited_dims: Mapping | None = None,
+    format: T_DataTreeNetcdfTypes | None = None,
+    engine: T_DataTreeNetcdfEngine | None = None,
+    group: str | None = None,
+    write_inherited_coords: bool = False,
+    compute: bool = True,
+    invalid_netcdf: bool = False,
+    auto_complex: bool | None = None,
+) -> None | memoryview | Delayed:
+    """Implementation of `DataTree.to_netcdf`."""
+
+    if format not in [None, *get_args(T_DataTreeNetcdfTypes)]:
+        raise ValueError("DataTree.to_netcdf only supports the NETCDF4 format")
+
+    if engine not in [None, *get_args(T_DataTreeNetcdfEngine)]:
+        raise ValueError(
+            "DataTree.to_netcdf only supports the netcdf4 and h5netcdf engines"
+        )
+
+    normalized_path = _normalize_path(filepath)
+
+    if engine is None:
+        engine = get_default_netcdf_write_engine(
+            path_or_file=normalized_path,
+            format="NETCDF4",  # required for supporting groups
+        )  # type: ignore[assignment]
+
+    if group is not None:
+        raise NotImplementedError(
+            "specifying a root group for the tree has not been implemented"
+        )
+
+    if encoding is None:
+        encoding = {}
+
+    # In the future, we may want to expand this check to insure all the provided encoding
+    # options are valid. For now, this simply checks that all provided encoding keys are
+    # groups in the datatree.
+    if set(encoding) - set(dt.groups):
+        raise ValueError(
+            f"unexpected encoding group name(s) provided: {set(encoding) - set(dt.groups)}"
+        )
+
+    if normalized_path is None:
+        if not compute:
+            raise NotImplementedError(
+                "to_netcdf() with compute=False is not yet implemented when "
+                "returning a memoryview"
+            )
+        target = BytesIOProxy()
+    else:
+        target = normalized_path  # type: ignore[assignment]
+
+    if unlimited_dims is None:
+        unlimited_dims = {}
+
+    scheduler = get_dask_scheduler()
+    have_chunks = any(
+        v.chunks is not None for node in dt.subtree for v in node.variables.values()
+    )
+    autoclose = have_chunks and scheduler in ["distributed", "multiprocessing"]
+
+    root_store = get_writable_netcdf_store(
+        target,
+        engine,  # type: ignore[arg-type]
+        mode=mode,
+        format=format,
+        autoclose=autoclose,
+        invalid_netcdf=invalid_netcdf,
+        auto_complex=auto_complex,
+    )
+
+    writer = ArrayWriter()
+
+    # TODO: allow this work (setting up the file for writing array data)
+    # to be parallelized with dask
+    try:
+        for node in dt.subtree:
+            at_root = node is dt
+            dataset = node.to_dataset(inherit=write_inherited_coords or at_root)
+            node_store = (
+                root_store if at_root else root_store.get_child_store(node.path)
+            )
+            dump_to_store(
+                dataset,
+                node_store,
+                writer,
+                encoding=encoding.get(node.path),
+                unlimited_dims=unlimited_dims.get(node.path),
+            )
+
+        if autoclose:
+            root_store.close()
+
+        writes = writer.sync(compute=compute)
+
+    finally:
+        if compute:
+            root_store.close()
+        else:
+            root_store.sync()
+
+    if filepath is None:
+        assert isinstance(target, BytesIOProxy)  # created in this function
+        return target.getbuffer()
+
+    if not compute:
+        return delayed_close_after_writes(writes, root_store)
+
+    return None
+
+
+def _datatree_to_zarr(
+    dt: DataTree,
+    store: ZarrStoreLike,
+    mode: ZarrWriteModes = "w-",
+    encoding: Mapping[str, Any] | None = None,
+    synchronizer=None,
+    group: str | None = None,
+    write_inherited_coords: bool = False,
+    *,
+    chunk_store: MutableMapping | str | PathLike | None = None,
+    compute: bool = True,
+    consolidated: bool | None = None,
+    append_dim: Hashable | None = None,
+    region: Mapping[str, slice | Literal["auto"]] | Literal["auto"] | None = None,
+    safe_chunks: bool = True,
+    align_chunks: bool = False,
+    storage_options: dict[str, str] | None = None,
+    zarr_version: int | None = None,
+    zarr_format: int | None = None,
+    write_empty_chunks: bool | None = None,
+    chunkmanager_store_kwargs: dict[str, Any] | None = None,
+) -> ZarrStore | Delayed:
+    """Implementation of `DataTree.to_zarr`."""
+
+    if group is not None:
+        raise NotImplementedError(
+            "specifying a root group for the tree has not been implemented"
+        )
+
+    if append_dim is not None:
+        raise NotImplementedError(
+            "specifying ``append_dim`` with ``DataTree.to_zarr`` has not been implemented"
+        )
+
+    if encoding is None:
+        encoding = {}
+
+    # In the future, we may want to expand this check to insure all the provided encoding
+    # options are valid. For now, this simply checks that all provided encoding keys are
+    # groups in the datatree.
+    if set(encoding) - set(dt.groups):
+        raise ValueError(
+            f"unexpected encoding group name(s) provided: {set(encoding) - set(dt.groups)}"
+        )
+
+    root_store = get_writable_zarr_store(
+        store,
+        chunk_store=chunk_store,
+        mode=mode,
+        synchronizer=synchronizer,
+        group=group,
+        consolidated=consolidated,
+        append_dim=append_dim,
+        region=region,
+        safe_chunks=safe_chunks,
+        align_chunks=align_chunks,
+        storage_options=storage_options,
+        zarr_version=zarr_version,
+        zarr_format=zarr_format,
+        write_empty_chunks=write_empty_chunks,
+    )
+
+    writer = ArrayWriter()
+
+    try:
+        for rel_path, node in dt.subtree_with_keys:
+            at_root = node is dt
+            dataset = node.to_dataset(inherit=write_inherited_coords or at_root)
+            # Use a relative path for group, because absolute paths are broken
+            # with consolidated metadata in zarr 3.1.2 and earlier:
+            # https://github.com/zarr-developers/zarr-python/pull/3428
+            node_store = root_store if at_root else root_store.get_child_store(rel_path)
+
+            dataset = node_store._validate_and_autodetect_region(dataset)
+            node_store._validate_encoding(encoding)
+
+            dump_to_store(
+                dataset,
+                node_store,
+                writer,
+                encoding=encoding.get(node.path),
+            )
+        writes = writer.sync(
+            compute=compute, chunkmanager_store_kwargs=chunkmanager_store_kwargs
+        )
+    finally:
+        if compute:
+            root_store.close()
+
+    if not compute:
+        return delayed_close_after_writes(writes, root_store)
+
+    return root_store
+
+
+def _get_netcdf_autoclose(dataset: Dataset, engine: T_NetcdfEngine) -> bool:
+    """Should we close files after each write operations?"""
+    scheduler = get_dask_scheduler()
+    have_chunks = any(v.chunks is not None for v in dataset.variables.values())
+
+    autoclose = have_chunks and scheduler in ["distributed", "multiprocessing"]
+    if autoclose and engine == "scipy":
+        raise NotImplementedError(
+            f"Writing netCDF files with the {engine} backend "
+            f"is not currently supported with dask's {scheduler} scheduler"
+        )
+    return autoclose
diff -pruN 2025.03.1-8/xarray/backends/zarr.py 2025.10.1-1/xarray/backends/zarr.py
--- 2025.03.1-8/xarray/backends/zarr.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/xarray/backends/zarr.py	2025-10-10 10:38:05.000000000 +0000
@@ -5,17 +5,19 @@ import json
 import os
 import struct
 from collections.abc import Hashable, Iterable, Mapping
-from typing import TYPE_CHECKING, Any, Literal, cast
+from typing import TYPE_CHECKING, Any, Literal, Self, cast
 
 import numpy as np
 import pandas as pd
 
 from xarray import coding, conventions
+from xarray.backends.chunks import grid_rechunk, validate_grid_chunks_alignment
 from xarray.backends.common import (
     BACKEND_ENTRYPOINTS,
     AbstractWritableDataStore,
     BackendArray,
     BackendEntrypoint,
+    T_PathFileOrDataStore,
     _encode_variable_name,
     _normalize_path,
     datatree_from_dict_with_io_cleanup,
@@ -38,10 +40,9 @@ from xarray.namedarray.pycompat import i
 from xarray.namedarray.utils import module_available
 
 if TYPE_CHECKING:
-    from xarray.backends.common import AbstractDataStore
     from xarray.core.dataset import Dataset
     from xarray.core.datatree import DataTree
-    from xarray.core.types import ReadBuffer, ZarrArray, ZarrGroup
+    from xarray.core.types import ZarrArray, ZarrGroup
 
 
 def _get_mappers(*, storage_options, store, chunk_store):
@@ -179,12 +180,23 @@ def encode_zarr_attr_value(value):
     return encoded
 
 
+def has_zarr_async_index() -> bool:
+    try:
+        import zarr
+
+        return hasattr(zarr.AsyncArray, "oindex")
+    except (ImportError, AttributeError):
+        return False
+
+
 class ZarrArrayWrapper(BackendArray):
     __slots__ = ("_array", "dtype", "shape")
 
     def __init__(self, zarr_array):
         # some callers attempt to evaluate an array if an `array` property exists on the object.
         # we prefix with _ to avoid this inference.
+
+        # TODO type hint this?
         self._array = zarr_array
         self.shape = self._array.shape
 
@@ -212,6 +224,33 @@ class ZarrArrayWrapper(BackendArray):
     def _getitem(self, key):
         return self._array[key]
 
+    async def _async_getitem(self, key):
+        if not _zarr_v3():
+            raise NotImplementedError(
+                "For lazy basic async indexing with zarr, zarr-python=>v3.0.0 is required"
+            )
+
+        async_array = self._array._async_array
+        return await async_array.getitem(key)
+
+    async def _async_oindex(self, key):
+        if not has_zarr_async_index():
+            raise NotImplementedError(
+                "For lazy orthogonal async indexing with zarr, zarr-python=>v3.1.2 is required"
+            )
+
+        async_array = self._array._async_array
+        return await async_array.oindex.getitem(key)
+
+    async def _async_vindex(self, key):
+        if not has_zarr_async_index():
+            raise NotImplementedError(
+                "For lazy vectorized async indexing with zarr, zarr-python=>v3.1.2 is required"
+            )
+
+        async_array = self._array._async_array
+        return await async_array.vindex.getitem(key)
+
     def __getitem__(self, key):
         array = self._array
         if isinstance(key, indexing.BasicIndexer):
@@ -227,10 +266,20 @@ class ZarrArrayWrapper(BackendArray):
         # if self.ndim == 0:
         # could possibly have a work-around for 0d data here
 
+    async def async_getitem(self, key):
+        array = self._array
+        if isinstance(key, indexing.BasicIndexer):
+            method = self._async_getitem
+        elif isinstance(key, indexing.VectorizedIndexer):
+            method = self._async_vindex
+        elif isinstance(key, indexing.OuterIndexer):
+            method = self._async_oindex
+        return await indexing.async_explicit_indexing_adapter(
+            key, array.shape, indexing.IndexingSupport.VECTORIZED, method
+        )
 
-def _determine_zarr_chunks(
-    enc_chunks, var_chunks, ndim, name, safe_chunks, region, mode, shape
-):
+
+def _determine_zarr_chunks(enc_chunks, var_chunks, ndim, name):
     """
     Given encoding chunks (possibly None or []) and variable chunks
     (possibly None or []).
@@ -268,7 +317,7 @@ def _determine_zarr_chunks(
         # return the first chunk for each dimension
         return tuple(chunk[0] for chunk in var_chunks)
 
-    # from here on, we are dealing with user-specified chunks in encoding
+    # From here on, we are dealing with user-specified chunks in encoding
     # zarr allows chunks to be an integer, in which case it uses the same chunk
     # size on each dimension.
     # Here we re-implement this expansion ourselves. That makes the logic of
@@ -282,7 +331,10 @@ def _determine_zarr_chunks(
     if len(enc_chunks_tuple) != ndim:
         # throw away encoding chunks, start over
         return _determine_zarr_chunks(
-            None, var_chunks, ndim, name, safe_chunks, region, mode, shape
+            None,
+            var_chunks,
+            ndim,
+            name,
         )
 
     for x in enc_chunks_tuple:
@@ -299,68 +351,6 @@ def _determine_zarr_chunks(
     if not var_chunks:
         return enc_chunks_tuple
 
-    # the hard case
-    # DESIGN CHOICE: do not allow multiple dask chunks on a single zarr chunk
-    # this avoids the need to get involved in zarr synchronization / locking
-    # From zarr docs:
-    #  "If each worker in a parallel computation is writing to a
-    #   separate region of the array, and if region boundaries are perfectly aligned
-    #   with chunk boundaries, then no synchronization is required."
-    # TODO: incorporate synchronizer to allow writes from multiple dask
-    # threads
-
-    # If it is possible to write on partial chunks then it is not necessary to check
-    # the last one contained on the region
-    allow_partial_chunks = mode != "r+"
-
-    base_error = (
-        f"Specified zarr chunks encoding['chunks']={enc_chunks_tuple!r} for "
-        f"variable named {name!r} would overlap multiple dask chunks {var_chunks!r} "
-        f"on the region {region}. "
-        f"Writing this array in parallel with dask could lead to corrupted data. "
-        f"Consider either rechunking using `chunk()`, deleting "
-        f"or modifying `encoding['chunks']`, or specify `safe_chunks=False`."
-    )
-
-    for zchunk, dchunks, interval, size in zip(
-        enc_chunks_tuple, var_chunks, region, shape, strict=True
-    ):
-        if not safe_chunks:
-            continue
-
-        for dchunk in dchunks[1:-1]:
-            if dchunk % zchunk:
-                raise ValueError(base_error)
-
-        region_start = interval.start if interval.start else 0
-
-        if len(dchunks) > 1:
-            # The first border size is the amount of data that needs to be updated on the
-            # first chunk taking into account the region slice.
-            first_border_size = zchunk
-            if allow_partial_chunks:
-                first_border_size = zchunk - region_start % zchunk
-
-            if (dchunks[0] - first_border_size) % zchunk:
-                raise ValueError(base_error)
-
-        if not allow_partial_chunks:
-            region_stop = interval.stop if interval.stop else size
-
-            if region_start % zchunk:
-                # The last chunk which can also be the only one is a partial chunk
-                # if it is not aligned at the beginning
-                raise ValueError(base_error)
-
-            if np.ceil(region_stop / zchunk) == np.ceil(size / zchunk):
-                # If the region is covering the last chunk then check
-                # if the reminder with the default chunk size
-                # is equal to the size of the last chunk
-                if dchunks[-1] % zchunk != size % zchunk:
-                    raise ValueError(base_error)
-            elif dchunks[-1] % zchunk:
-                raise ValueError(base_error)
-
     return enc_chunks_tuple
 
 
@@ -427,10 +417,6 @@ def extract_zarr_variable_encoding(
     name=None,
     *,
     zarr_format: ZarrFormat,
-    safe_chunks=True,
-    region=None,
-    mode=None,
-    shape=None,
 ):
     """
     Extract zarr encoding dictionary from xarray Variable
@@ -440,10 +426,6 @@ def extract_zarr_variable_encoding(
     variable : Variable
     raise_on_invalid : bool, optional
     name: str | Hashable, optional
-    safe_chunks: bool, optional
-    region: tuple[slice, ...], optional
-    mode: str, optional
-    shape: tuple[int, ...], optional
     zarr_format: Literal[2,3]
     Returns
     -------
@@ -451,7 +433,6 @@ def extract_zarr_variable_encoding(
         Zarr encoding for `variable`
     """
 
-    shape = shape if shape else variable.shape
     encoding = variable.encoding.copy()
 
     safe_to_drop = {"source", "original_shape", "preferred_chunks"}
@@ -464,6 +445,7 @@ def extract_zarr_variable_encoding(
         "serializer",
         "cache_metadata",
         "write_empty_chunks",
+        "chunk_key_encoding",
     }
     if zarr_format == 3:
         valid_encodings.add("fill_value")
@@ -493,10 +475,6 @@ def extract_zarr_variable_encoding(
         var_chunks=variable.chunks,
         ndim=variable.ndim,
         name=name,
-        safe_chunks=safe_chunks,
-        region=region,
-        mode=mode,
-        shape=shape,
     )
     if _zarr_v3() and chunks is None:
         chunks = "auto"
@@ -562,7 +540,7 @@ def _validate_datatypes_for_zarr_append(
         # in the dataset, and with dtypes which are not known to be easy-to-append, necessitate
         # exact dtype equality, as checked below.
         pass
-    elif not new_var.dtype == existing_var.dtype:
+    elif new_var.dtype != existing_var.dtype:
         raise ValueError(
             f"Mismatched dtypes for variable {vname} between Zarr store on disk "
             f"and dataset to append. Store has dtype {existing_var.dtype} but "
@@ -621,6 +599,7 @@ class ZarrStore(AbstractWritableDataStor
     """Store for reading and writing data via zarr"""
 
     __slots__ = (
+        "_align_chunks",
         "_append_dim",
         "_cache_members",
         "_close_store_on_close",
@@ -651,6 +630,7 @@ class ZarrStore(AbstractWritableDataStor
         append_dim=None,
         write_region=None,
         safe_chunks=True,
+        align_chunks=False,
         zarr_version=None,
         zarr_format=None,
         use_zarr_fill_value_as_mask=None,
@@ -698,6 +678,7 @@ class ZarrStore(AbstractWritableDataStor
                 write_empty,
                 close_store_on_close,
                 use_zarr_fill_value_as_mask,
+                align_chunks=align_chunks,
                 cache_members=cache_members,
             )
             for group, group_store in group_members.items()
@@ -718,6 +699,7 @@ class ZarrStore(AbstractWritableDataStor
         append_dim=None,
         write_region=None,
         safe_chunks=True,
+        align_chunks=False,
         zarr_version=None,
         zarr_format=None,
         use_zarr_fill_value_as_mask=None,
@@ -753,7 +735,8 @@ class ZarrStore(AbstractWritableDataStor
             write_empty,
             close_store_on_close,
             use_zarr_fill_value_as_mask,
-            cache_members,
+            align_chunks=align_chunks,
+            cache_members=cache_members,
         )
 
     def __init__(
@@ -767,8 +750,13 @@ class ZarrStore(AbstractWritableDataStor
         write_empty: bool | None = None,
         close_store_on_close: bool = False,
         use_zarr_fill_value_as_mask=None,
+        align_chunks: bool = False,
         cache_members: bool = True,
     ):
+        if align_chunks:
+            # Disabled the safe_chunks validations if the alignment is going to be applied
+            safe_chunks = False
+
         self.zarr_group = zarr_group
         self._read_only = self.zarr_group.read_only
         self._synchronizer = self.zarr_group.synchronizer
@@ -777,6 +765,7 @@ class ZarrStore(AbstractWritableDataStor
         self._consolidate_on_close = consolidate_on_close
         self._append_dim = append_dim
         self._write_region = write_region
+        self._align_chunks = align_chunks
         self._safe_chunks = safe_chunks
         self._write_empty = write_empty
         self._close_store_on_close = close_store_on_close
@@ -796,6 +785,22 @@ class ZarrStore(AbstractWritableDataStor
             # on demand.
             self._members = self._fetch_members()
 
+    def get_child_store(self, group: str) -> Self:
+        zarr_group = self.zarr_group.require_group(group)
+        return type(self)(
+            zarr_group=zarr_group,
+            mode=self._mode,
+            consolidate_on_close=self._consolidate_on_close,
+            append_dim=self._append_dim,
+            write_region=self._write_region,
+            safe_chunks=self._safe_chunks,
+            write_empty=self._write_empty,
+            close_store_on_close=self._close_store_on_close,
+            use_zarr_fill_value_as_mask=self._use_zarr_fill_value_as_mask,
+            align_chunks=self._align_chunks,
+            cache_members=self._cache_members,
+        )
+
     @property
     def members(self) -> dict[str, ZarrArray | ZarrGroup]:
         """
@@ -877,9 +882,8 @@ class ZarrStore(AbstractWritableDataStor
             if zarr_array.fill_value is not None:
                 attributes["_FillValue"] = zarr_array.fill_value
         elif "_FillValue" in attributes:
-            original_zarr_dtype = zarr_array.metadata.data_type
             attributes["_FillValue"] = FillValueCoder.decode(
-                attributes["_FillValue"], original_zarr_dtype.value
+                attributes["_FillValue"], zarr_array.dtype
             )
 
         return Variable(dimensions, data, attributes, encoding)
@@ -917,8 +921,8 @@ class ZarrStore(AbstractWritableDataStor
     def set_attributes(self, attributes):
         _put_attrs(self.zarr_group, attributes)
 
-    def encode_variable(self, variable):
-        variable = encode_zarr_variable(variable)
+    def encode_variable(self, variable, name=None):
+        variable = encode_zarr_variable(variable, name=name)
         return variable
 
     def encode_attribute(self, a):
@@ -1058,9 +1062,6 @@ class ZarrStore(AbstractWritableDataStor
                 kwargs["zarr_format"] = self.zarr_group.metadata.zarr_format
             zarr.consolidate_metadata(self.zarr_group.store, **kwargs)
 
-    def sync(self):
-        pass
-
     def _open_existing_array(self, *, name) -> ZarrArray:
         import zarr
         from zarr import Array as ZarrArray
@@ -1139,7 +1140,13 @@ class ZarrStore(AbstractWritableDataStor
         zarr_array = _put_attrs(zarr_array, attrs)
         return zarr_array
 
-    def set_variables(self, variables, check_encoding_set, writer, unlimited_dims=None):
+    def set_variables(
+        self,
+        variables: dict[str, Variable],
+        check_encoding_set,
+        writer,
+        unlimited_dims=None,
+    ):
         """
         This provides a centralized method to set the variables on the data
         store.
@@ -1171,6 +1178,11 @@ class ZarrStore(AbstractWritableDataStor
                 fill_value = attrs.pop("_FillValue", None)
             else:
                 fill_value = v.encoding.pop("fill_value", None)
+                if fill_value is None and v.dtype.kind == "f":
+                    # For floating point data, Xarray defaults to a fill_value
+                    # of NaN (unlike Zarr, which uses zero):
+                    # https://github.com/pydata/xarray/issues/10646
+                    fill_value = np.nan
                 if "_FillValue" in attrs:
                     # replace with encoded fill value
                     fv = attrs.pop("_FillValue")
@@ -1200,8 +1212,11 @@ class ZarrStore(AbstractWritableDataStor
                         zarr_array.shape[append_axis], None
                     )
 
-                    new_shape = list(zarr_array.shape)
-                    new_shape[append_axis] += v.shape[append_axis]
+                    new_shape = (
+                        zarr_array.shape[:append_axis]
+                        + (zarr_array.shape[append_axis] + v.shape[append_axis],)
+                        + zarr_array.shape[append_axis + 1 :]
+                    )
                     zarr_array.resize(new_shape)
 
                 zarr_shape = zarr_array.shape
@@ -1217,13 +1232,36 @@ class ZarrStore(AbstractWritableDataStor
                 v,
                 raise_on_invalid=vn in check_encoding_set,
                 name=vn,
-                safe_chunks=self._safe_chunks,
-                region=region,
-                mode=self._mode,
-                shape=zarr_shape,
                 zarr_format=3 if is_zarr_v3_format else 2,
             )
 
+            if self._align_chunks and isinstance(encoding["chunks"], tuple):
+                v = grid_rechunk(
+                    v=v,
+                    enc_chunks=encoding["chunks"],
+                    region=region,
+                )
+
+            if self._safe_chunks and isinstance(encoding["chunks"], tuple):
+                # the hard case
+                # DESIGN CHOICE: do not allow multiple dask chunks on a single zarr chunk
+                # this avoids the need to get involved in zarr synchronization / locking
+                # From zarr docs:
+                #  "If each worker in a parallel computation is writing to a
+                #   separate region of the array, and if region boundaries are perfectly aligned
+                #   with chunk boundaries, then no synchronization is required."
+                # TODO: incorporate synchronizer to allow writes from multiple dask
+                # threads
+                shape = zarr_shape or v.shape
+                validate_grid_chunks_alignment(
+                    nd_v_chunks=v.chunks,
+                    enc_chunks=encoding["chunks"],
+                    region=region,
+                    allow_partial_chunks=self._mode != "r+",
+                    name=name,
+                    backend_shape=shape,
+                )
+
             if self._mode == "w" or name not in existing_keys:
                 # new variable
                 encoded_attrs = {k: self.encode_attribute(v) for k, v in attrs.items()}
@@ -1233,7 +1271,7 @@ class ZarrStore(AbstractWritableDataStor
                 else:
                     encoded_attrs[DIMENSION_KEY] = dims
 
-                encoding["overwrite"] = True if self._mode == "w" else False
+                encoding["overwrite"] = self._mode == "w"
 
                 zarr_array = self._create_new_array(
                     name=name,
@@ -1246,6 +1284,9 @@ class ZarrStore(AbstractWritableDataStor
 
             writer.add(v.data, zarr_array, region)
 
+    def sync(self) -> None:
+        pass
+
     def close(self) -> None:
         if self._close_store_on_close:
             self.zarr_group.store.close()
@@ -1290,7 +1331,7 @@ class ZarrStore(AbstractWritableDataStor
         region = self._write_region
 
         if region == "auto":
-            region = {dim: "auto" for dim in ds.dims}
+            region = dict.fromkeys(ds.dims, "auto")
 
         if not isinstance(region, dict):
             raise TypeError(f"``region`` must be a dict, got {type(region)}")
@@ -1323,7 +1364,7 @@ class ZarrStore(AbstractWritableDataStor
         non_matching_vars = [
             k for k, v in ds.variables.items() if not set(region).intersection(v.dims)
         ]
-        if non_matching_vars:
+        if region and non_matching_vars:
             raise ValueError(
                 f"when setting `region` explicitly in to_zarr(), all "
                 f"variables in the dataset to write must have at least "
@@ -1377,6 +1418,7 @@ def open_zarr(
     use_zarr_fill_value_as_mask=None,
     chunked_array_type: str | None = None,
     from_array_kwargs: dict[str, Any] | None = None,
+    create_default_indexes=True,
     **kwargs,
 ):
     """Load and decode a dataset from a Zarr store.
@@ -1399,8 +1441,10 @@ def open_zarr(
 
         - ``chunks='auto'`` will use dask ``auto`` chunking taking into account the
           engine preferred chunks.
-        - ``chunks=None`` skips using dask, which is generally faster for
-          small arrays.
+        - ``chunks=None`` skips using dask. This uses xarray's internally private
+          :ref:`lazy indexing classes <internal design.lazy indexing>`,
+          but data is eagerly loaded into memory as numpy arrays when accessed.
+          This can be more efficient for smaller arrays, though results may vary.
         - ``chunks=-1`` loads the data with dask using a single chunk for all arrays.
         - ``chunks={}`` loads the data with dask using engine preferred chunks if
           exposed by the backend, otherwise with a single chunk for all arrays.
@@ -1487,6 +1531,13 @@ def open_zarr(
         chunked arrays, via whichever chunk manager is specified through the ``chunked_array_type`` kwarg.
         Defaults to ``{'manager': 'dask'}``, meaning additional kwargs will be passed eventually to
         :py:func:`dask.array.from_array`. Experimental API that should not be relied upon.
+    create_default_indexes : bool, default: True
+        If True, create pandas indexes for :term:`dimension coordinates <dimension coordinate>`,
+        which loads the coordinate data into memory. Set it to False if you want to avoid loading
+        data into memory.
+
+        Note that backends can still choose to create other indexes. If you want to control that,
+        please refer to the backend's documentation.
 
     Returns
     -------
@@ -1543,6 +1594,7 @@ def open_zarr(
         engine="zarr",
         chunks=chunks,
         drop_variables=drop_variables,
+        create_default_indexes=create_default_indexes,
         chunked_array_type=chunked_array_type,
         from_array_kwargs=from_array_kwargs,
         backend_kwargs=backend_kwargs,
@@ -1568,20 +1620,20 @@ class ZarrBackendEntrypoint(BackendEntry
 
     description = "Open zarr files (.zarr) using zarr in Xarray"
     url = "https://docs.xarray.dev/en/stable/generated/xarray.backends.ZarrBackendEntrypoint.html"
+    supports_groups = True
 
-    def guess_can_open(
-        self,
-        filename_or_obj: str | os.PathLike[Any] | ReadBuffer | AbstractDataStore,
-    ) -> bool:
+    def guess_can_open(self, filename_or_obj: T_PathFileOrDataStore) -> bool:
         if isinstance(filename_or_obj, str | os.PathLike):
-            _, ext = os.path.splitext(filename_or_obj)
-            return ext in {".zarr"}
+            # allow a trailing slash to account for an autocomplete
+            # adding it.
+            _, ext = os.path.splitext(str(filename_or_obj).rstrip("/"))
+            return ext in [".zarr"]
 
         return False
 
     def open_dataset(
         self,
-        filename_or_obj: str | os.PathLike[Any] | ReadBuffer | AbstractDataStore,
+        filename_or_obj: T_PathFileOrDataStore,
         *,
         mask_and_scale=True,
         decode_times=True,
@@ -1636,7 +1688,7 @@ class ZarrBackendEntrypoint(BackendEntry
 
     def open_datatree(
         self,
-        filename_or_obj: str | os.PathLike[Any] | ReadBuffer | AbstractDataStore,
+        filename_or_obj: T_PathFileOrDataStore,
         *,
         mask_and_scale=True,
         decode_times=True,
@@ -1678,7 +1730,7 @@ class ZarrBackendEntrypoint(BackendEntry
 
     def open_groups_as_dict(
         self,
-        filename_or_obj: str | os.PathLike[Any] | ReadBuffer | AbstractDataStore,
+        filename_or_obj: T_PathFileOrDataStore,
         *,
         mask_and_scale=True,
         decode_times=True,
@@ -1798,6 +1850,11 @@ def _get_open_params(
     else:
         missing_exc = zarr.errors.GroupNotFoundError
 
+    if _zarr_v3():
+        # zarr 3.0.8 and earlier did not support this property - it was effectively assumed true
+        if not getattr(store, "supports_consolidated_metadata", True):
+            consolidated = consolidate_on_close = False
+
     if consolidated in [None, True]:
         # open the root of the store, in case there is metadata consolidated there
         group = open_kwargs.pop("path")
@@ -1855,6 +1912,7 @@ def _get_open_params(
         else:
             # this was the default for v2 and should apply to most existing Zarr data
             use_zarr_fill_value_as_mask = True
+
     return (
         zarr_group,
         consolidate_on_close,
diff -pruN 2025.03.1-8/xarray/coding/calendar_ops.py 2025.10.1-1/xarray/coding/calendar_ops.py
--- 2025.03.1-8/xarray/coding/calendar_ops.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/xarray/coding/calendar_ops.py	2025-10-10 10:38:05.000000000 +0000
@@ -213,7 +213,7 @@ def convert_calendar(
         out[dim] = new_times
 
         # Remove NaN that where put on invalid dates in target calendar
-        out = out.where(out[dim].notnull(), drop=True)
+        out = out.sel({dim: out[dim].notnull()})
 
         if use_cftime:
             # Reassign times to ensure time index of output is a CFTimeIndex
diff -pruN 2025.03.1-8/xarray/coding/cftime_offsets.py 2025.10.1-1/xarray/coding/cftime_offsets.py
--- 2025.03.1-8/xarray/coding/cftime_offsets.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/xarray/coding/cftime_offsets.py	2025-10-10 10:38:05.000000000 +0000
@@ -279,9 +279,8 @@ def _adjust_n_years(other, n, month, ref
     if n > 0:
         if other.month < month or (other.month == month and other.day < reference_day):
             n -= 1
-    else:
-        if other.month > month or (other.month == month and other.day > reference_day):
-            n += 1
+    elif other.month > month or (other.month == month and other.day > reference_day):
+        n += 1
     return n
 
 
@@ -290,20 +289,18 @@ def _shift_month(date, months, day_optio
     _ = attempt_import("cftime")
 
     has_year_zero = date.has_year_zero
-    delta_year = (date.month + months) // 12
+    year = date.year + (date.month + months) // 12
     month = (date.month + months) % 12
 
     if month == 0:
         month = 12
-        delta_year = delta_year - 1
+        year -= 1
 
     if not has_year_zero:
-        if date.year < 0 and date.year + delta_year >= 0:
-            delta_year = delta_year + 1
-        elif date.year > 0 and date.year + delta_year <= 0:
-            delta_year = delta_year - 1
-
-    year = date.year + delta_year
+        if date.year < 0 <= year:
+            year += 1
+        elif year <= 0 < date.year:
+            year -= 1
 
     # Silence warnings associated with generating dates with years < 1.
     with warnings.catch_warnings():
@@ -353,12 +350,11 @@ def roll_qtrday(
             # pretend to roll back if on same month but
             # before compare_day
             n -= 1
-    else:
-        if months_since > 0 or (
-            months_since == 0 and other.day > _get_day_of_month(other, day_option)
-        ):
-            # make sure to roll forward, so negate
-            n += 1
+    elif months_since > 0 or (
+        months_since == 0 and other.day > _get_day_of_month(other, day_option)
+    ):
+        # make sure to roll forward, so negate
+        n += 1
     return n
 
 
@@ -613,14 +609,19 @@ class YearEnd(YearOffset):
             return date - YearEnd(month=self.month)
 
 
-class Day(Tick):
-    _freq = "D"
+class Day(BaseCFTimeOffset):
+    """Day offset following definition in pandas/_libs/tslibs/offsets.pyx"""
 
-    def as_timedelta(self) -> timedelta:
-        return timedelta(days=self.n)
+    _freq = "D"
 
     def __apply__(self, other):
-        return other + self.as_timedelta()
+        if isinstance(other, Day):
+            return Day(self.n + other.n)
+        else:
+            return other + timedelta(days=self.n)
+
+    def onOffset(self, date) -> bool:
+        return True
 
 
 class Hour(Tick):
@@ -722,8 +723,8 @@ _PATTERN = rf"^((?P<multiple>[+-]?\d+)|(
 
 
 # pandas defines these offsets as "Tick" objects, which for instance have
-# distinct behavior from monthly or longer frequencies in resample.
-CFTIME_TICKS = (Day, Hour, Minute, Second)
+# distinct behavior from daily or longer frequencies in resample.
+CFTIME_TICKS = (Hour, Minute, Second)
 
 
 def _generate_anchored_deprecated_frequencies(
@@ -805,23 +806,19 @@ def delta_to_tick(delta: timedelta | pd.
             "nanoseconds to 'CFTimeOffset' object"
         )
     if delta.microseconds == 0:
-        if delta.seconds == 0:
-            return Day(n=delta.days)
+        seconds = delta.days * 86400 + delta.seconds
+        if seconds % 3600 == 0:
+            return Hour(n=seconds // 3600)
+        elif seconds % 60 == 0:
+            return Minute(n=seconds // 60)
         else:
-            seconds = delta.days * 86400 + delta.seconds
-            if seconds % 3600 == 0:
-                return Hour(n=seconds // 3600)
-            elif seconds % 60 == 0:
-                return Minute(n=seconds // 60)
-            else:
-                return Second(n=seconds)
+            return Second(n=seconds)
+    # Regardless of the days and seconds this will always be a Millisecond
+    # or Microsecond object
+    elif delta.microseconds % 1_000 == 0:
+        return Millisecond(n=delta.microseconds // 1_000)
     else:
-        # Regardless of the days and seconds this will always be a Millisecond
-        # or Microsecond object
-        if delta.microseconds % 1_000 == 0:
-            return Millisecond(n=delta.microseconds // 1_000)
-        else:
-            return Microsecond(n=delta.microseconds)
+        return Microsecond(n=delta.microseconds)
 
 
 def to_cftime_datetime(date_str_or_date, calendar=None):
@@ -1424,14 +1421,12 @@ def date_range(
     cftime_range
     date_range_like
     """
-    from xarray.coding.times import _is_standard_calendar
-
     if tz is not None:
         use_cftime = False
 
     if _is_standard_calendar(calendar) and use_cftime is not True:
         try:
-            return pd.date_range(
+            return pd.date_range(  # type: ignore[call-overload,unused-ignore]
                 start=start,
                 end=end,
                 periods=periods,
@@ -1615,11 +1610,10 @@ def date_range_like(source, calendar, us
         source_calendar = "standard"
         source_start = default_precision_timestamp(source_start)
         source_end = default_precision_timestamp(source_end)
-    else:
-        if isinstance(source, CFTimeIndex):
-            source_calendar = source.calendar
-        else:  # DataArray
-            source_calendar = source.dt.calendar
+    elif isinstance(source, CFTimeIndex):
+        source_calendar = source.calendar
+    else:  # DataArray
+        source_calendar = source.dt.calendar
 
     if calendar == source_calendar and is_np_datetime_like(source.dtype) ^ use_cftime:
         return source
diff -pruN 2025.03.1-8/xarray/coding/cftimeindex.py 2025.10.1-1/xarray/coding/cftimeindex.py
--- 2025.03.1-8/xarray/coding/cftimeindex.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/xarray/coding/cftimeindex.py	2025-10-10 10:38:05.000000000 +0000
@@ -43,7 +43,7 @@ from __future__ import annotations
 
 import math
 from datetime import timedelta
-from typing import TYPE_CHECKING, Any, Optional
+from typing import TYPE_CHECKING, Any
 
 import numpy as np
 import pandas as pd
@@ -456,7 +456,7 @@ class CFTimeIndex(pd.Index):
         """Needed for .loc based partial-string indexing"""
         return self.__contains__(key)
 
-    def shift(  # type: ignore[override]  # freq is typed Any, we are more precise
+    def shift(  # type: ignore[override,unused-ignore]
         self,
         periods: int | float,
         freq: str | timedelta | BaseCFTimeOffset | None = None,
@@ -490,7 +490,7 @@ class CFTimeIndex(pd.Index):
         >>> index.shift(1, "ME")
         CFTimeIndex([2000-02-29 00:00:00],
                     dtype='object', length=1, calendar='standard', freq=None)
-        >>> index.shift(1.5, "D")
+        >>> index.shift(1.5, "24h")
         CFTimeIndex([2000-02-01 12:00:00],
                     dtype='object', length=1, calendar='standard', freq=None)
         """
@@ -549,7 +549,7 @@ class CFTimeIndex(pd.Index):
             ) from err
 
     def to_datetimeindex(
-        self, unsafe: bool = False, time_unit: Optional[PDDatetimeUnitOptions] = None
+        self, unsafe: bool = False, time_unit: PDDatetimeUnitOptions | None = None
     ) -> pd.DatetimeIndex:
         """If possible, convert this index to a pandas.DatetimeIndex.
 
@@ -689,8 +689,6 @@ class CFTimeIndex(pd.Index):
     @property
     def calendar(self):
         """The calendar used by the datetimes in the index."""
-        from xarray.coding.times import infer_calendar_name
-
         if not self._data.size:
             return None
 
@@ -709,16 +707,22 @@ class CFTimeIndex(pd.Index):
 
     def _round_via_method(self, freq, method):
         """Round dates using a specified method."""
-        from xarray.coding.cftime_offsets import CFTIME_TICKS, to_offset
+        from xarray.coding.cftime_offsets import CFTIME_TICKS, Day, to_offset
 
         if not self._data.size:
             return CFTimeIndex(np.array(self))
 
         offset = to_offset(freq)
-        if not isinstance(offset, CFTIME_TICKS):
+        if isinstance(offset, Day):
+            # Following pandas, "In the 'round' context, Day unambiguously
+            # means 24h, not calendar-day"
+            offset_as_timedelta = timedelta(days=offset.n)
+        elif isinstance(offset, CFTIME_TICKS):
+            offset_as_timedelta = offset.as_timedelta()
+        else:
             raise ValueError(f"{offset} is a non-fixed frequency")
 
-        unit = _total_microseconds(offset.as_timedelta())
+        unit = _total_microseconds(offset_as_timedelta)
         values = self.asi8
         rounded = method(values, unit)
         return _cftimeindex_from_i8(rounded, self.date_type, self.name)
diff -pruN 2025.03.1-8/xarray/coding/common.py 2025.10.1-1/xarray/coding/common.py
--- 2025.03.1-8/xarray/coding/common.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/xarray/coding/common.py	2025-10-10 10:38:05.000000000 +0000
@@ -53,7 +53,7 @@ class _ElementwiseFunctionArray(indexing
     Values are computed upon indexing or coercion to a NumPy array.
     """
 
-    def __init__(self, array, func: Callable, dtype: np.typing.DTypeLike):
+    def __init__(self, array, func: Callable, dtype: np.typing.DTypeLike | None):
         assert not is_chunked_array(array)
         self.array = indexing.as_indexable(array)
         self.func = func
@@ -63,6 +63,10 @@ class _ElementwiseFunctionArray(indexing
     def dtype(self) -> np.dtype:
         return np.dtype(self._dtype)
 
+    def transpose(self, order):
+        # For elementwise functions, we can compose transpose and function application
+        return type(self)(self.array.transpose(order), self.func, self.dtype)
+
     def _oindex_get(self, key):
         return type(self)(self.array.oindex[key], self.func, self.dtype)
 
@@ -75,11 +79,14 @@ class _ElementwiseFunctionArray(indexing
     def get_duck_array(self):
         return self.func(self.array.get_duck_array())
 
+    async def async_get_duck_array(self):
+        return self.func(await self.array.async_get_duck_array())
+
     def __repr__(self) -> str:
         return f"{type(self).__name__}({self.array!r}, func={self.func!r}, dtype={self.dtype!r})"
 
 
-def lazy_elemwise_func(array, func: Callable, dtype: np.typing.DTypeLike):
+def lazy_elemwise_func(array, func: Callable, dtype: np.typing.DTypeLike | None):
     """Lazily apply an element-wise function to an array.
     Parameters
     ----------
diff -pruN 2025.03.1-8/xarray/coding/frequencies.py 2025.10.1-1/xarray/coding/frequencies.py
--- 2025.03.1-8/xarray/coding/frequencies.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/xarray/coding/frequencies.py	2025-10-10 10:38:05.000000000 +0000
@@ -137,7 +137,7 @@ class _CFTimeFrequencyInferer:  # (pd.ts
             return self._infer_daily_rule()
         # There is no possible intraday frequency with a non-unique delta
         # Different from pandas: we don't need to manage DST and business offsets in cftime
-        elif not len(self.deltas) == 1:
+        elif len(self.deltas) != 1:
             return None
 
         if _is_multiple(delta, _ONE_HOUR):
diff -pruN 2025.03.1-8/xarray/coding/strings.py 2025.10.1-1/xarray/coding/strings.py
--- 2025.03.1-8/xarray/coding/strings.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/xarray/coding/strings.py	2025-10-10 10:38:05.000000000 +0000
@@ -2,6 +2,7 @@
 
 from __future__ import annotations
 
+import re
 from functools import partial
 
 import numpy as np
@@ -15,7 +16,7 @@ from xarray.coding.variables import (
     unpack_for_encoding,
 )
 from xarray.core import indexing
-from xarray.core.utils import module_available
+from xarray.core.utils import emit_user_level_warning, module_available
 from xarray.core.variable import Variable
 from xarray.namedarray.parallelcompat import get_chunked_array_type
 from xarray.namedarray.pycompat import is_chunked_array
@@ -113,6 +114,35 @@ def ensure_fixed_length_bytes(var: Varia
         return var
 
 
+def validate_char_dim_name(strlen, encoding, name) -> str:
+    """Check character array dimension naming and size and return it."""
+
+    if (char_dim_name := encoding.pop("char_dim_name", None)) is not None:
+        # 1 - extract all characters up to last number sequence
+        # 2 - extract last number sequence
+        match = re.search(r"^(.*?)(\d+)(?!.*\d)", char_dim_name)
+        if match:
+            new_dim_name = match.group(1)
+            if int(match.group(2)) != strlen:
+                emit_user_level_warning(
+                    f"String dimension naming mismatch on variable {name!r}. {char_dim_name!r} provided by encoding, but data has length of '{strlen}'. Using '{new_dim_name}{strlen}' instead of {char_dim_name!r} to prevent possible naming clash.\n"
+                    "To silence this warning either remove 'char_dim_name' from encoding or provide a fitting name."
+                )
+            char_dim_name = f"{new_dim_name}{strlen}"
+        elif (
+            original_shape := encoding.get("original_shape", [-1])[-1]
+        ) != -1 and original_shape != strlen:
+            emit_user_level_warning(
+                f"String dimension length mismatch on variable {name!r}. '{original_shape}' provided by encoding, but data has length of '{strlen}'. Using '{char_dim_name}{strlen}' instead of {char_dim_name!r} to prevent possible naming clash.\n"
+                f"To silence this warning remove 'original_shape' from encoding."
+            )
+            char_dim_name = f"{char_dim_name}{strlen}"
+    else:
+        char_dim_name = f"string{strlen}"
+
+    return char_dim_name
+
+
 class CharacterArrayCoder(VariableCoder):
     """Transforms between arrays containing bytes and character arrays."""
 
@@ -122,10 +152,7 @@ class CharacterArrayCoder(VariableCoder)
         dims, data, attrs, encoding = unpack_for_encoding(variable)
         if data.dtype.kind == "S" and encoding.get("dtype") is not str:
             data = bytes_to_char(data)
-            if "char_dim_name" in encoding.keys():
-                char_dim_name = encoding.pop("char_dim_name")
-            else:
-                char_dim_name = f"string{data.shape[-1]}"
+            char_dim_name = validate_char_dim_name(data.shape[-1], encoding, name)
             dims = dims + (char_dim_name,)
         return Variable(dims, data, attrs, encoding)
 
@@ -221,7 +248,7 @@ class StackedBytesArray(indexing.Explici
     values, when accessed, are automatically stacked along the last dimension.
 
     >>> indexer = indexing.BasicIndexer((slice(None),))
-    >>> StackedBytesArray(np.array(["a", "b", "c"], dtype="S1"))[indexer]
+    >>> np.array(StackedBytesArray(np.array(["a", "b", "c"], dtype="S1"))[indexer])
     array(b'abc', dtype='|S3')
     """
 
@@ -250,14 +277,17 @@ class StackedBytesArray(indexing.Explici
         return f"{type(self).__name__}({self.array!r})"
 
     def _vindex_get(self, key):
-        return _numpy_char_to_bytes(self.array.vindex[key])
+        return type(self)(self.array.vindex[key])
 
     def _oindex_get(self, key):
-        return _numpy_char_to_bytes(self.array.oindex[key])
+        return type(self)(self.array.oindex[key])
 
     def __getitem__(self, key):
         # require slicing the last dimension completely
         key = type(key)(indexing.expanded_indexer(key.tuple, self.array.ndim))
         if key.tuple[-1] != slice(None):
             raise IndexError("too many indices")
-        return _numpy_char_to_bytes(self.array[key])
+        return type(self)(self.array[key])
+
+    def get_duck_array(self):
+        return _numpy_char_to_bytes(self.array.get_duck_array())
diff -pruN 2025.03.1-8/xarray/coding/times.py 2025.10.1-1/xarray/coding/times.py
--- 2025.03.1-8/xarray/coding/times.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/xarray/coding/times.py	2025-10-10 10:38:05.000000000 +0000
@@ -1,5 +1,6 @@
 from __future__ import annotations
 
+import contextlib
 import re
 import warnings
 from collections.abc import Callable, Hashable
@@ -92,6 +93,12 @@ TIME_UNITS = frozenset(
 )
 
 
+_INVALID_LITERAL_TIMEDELTA64_ENCODING_KEYS = [
+    "add_offset",
+    "scale_factor",
+]
+
+
 def _is_standard_calendar(calendar: str) -> bool:
     return calendar.lower() in _STANDARD_CALENDARS
 
@@ -239,7 +246,7 @@ def build_pattern(
     ]
     pattern_list = []
     for sep, name, sub_pattern in pieces:
-        pattern_list.append((sep if sep else "") + named(name, sub_pattern))
+        pattern_list.append((sep or "") + named(name, sub_pattern))
         # TODO: allow timezone offsets?
     return "^" + trailing_optional(pattern_list) + "$"
 
@@ -423,12 +430,11 @@ def _check_date_is_after_shift(
     # if we are outside the well-defined date range
     # proleptic_gregorian and standard/gregorian are only equivalent
     # if reference date and date range is >= 1582-10-15
-    if calendar != "proleptic_gregorian":
-        if date < type(date)(1582, 10, 15):
-            raise OutOfBoundsDatetime(
-                f"Dates before 1582-10-15 cannot be decoded "
-                f"with pandas using {calendar!r} calendar: {date}"
-            )
+    if calendar != "proleptic_gregorian" and date < type(date)(1582, 10, 15):
+        raise OutOfBoundsDatetime(
+            f"Dates before 1582-10-15 cannot be decoded "
+            f"with pandas using {calendar!r} calendar: {date}"
+        )
 
 
 def _check_higher_resolution(
@@ -573,9 +579,8 @@ def decode_cf_datetime(
                     "'time_unit' or specify 'use_cftime=True'.",
                     SerializationWarning,
                 )
-            else:
-                if _is_standard_calendar(calendar):
-                    dates = cftime_to_nptime(dates, time_unit=time_unit)
+            elif _is_standard_calendar(calendar):
+                dates = cftime_to_nptime(dates, time_unit=time_unit)
     elif use_cftime:
         dates = _decode_datetime_with_cftime(flat_num_dates, units, calendar)
     else:
@@ -659,22 +664,28 @@ def decode_cf_timedelta(
     num_timedeltas = to_numpy(num_timedeltas)
     unit = _netcdf_to_numpy_timeunit(units)
 
+    # special case empty arrays
+    is_empty_array = num_timedeltas.size == 0
+
     with warnings.catch_warnings():
         warnings.filterwarnings("ignore", "All-NaN slice encountered", RuntimeWarning)
-        _check_timedelta_range(np.nanmin(num_timedeltas), unit, time_unit)
-        _check_timedelta_range(np.nanmax(num_timedeltas), unit, time_unit)
+        if not is_empty_array:
+            _check_timedelta_range(np.nanmin(num_timedeltas), unit, time_unit)
+            _check_timedelta_range(np.nanmax(num_timedeltas), unit, time_unit)
 
     timedeltas = _numbers_to_timedelta(
         num_timedeltas, unit, "s", "timedeltas", target_unit=time_unit
     )
     pd_timedeltas = pd.to_timedelta(ravel(timedeltas))
 
-    if np.isnat(timedeltas).all():
+    if not is_empty_array and np.isnat(timedeltas).all():
         empirical_unit = time_unit
     else:
         empirical_unit = pd_timedeltas.unit
 
-    if np.timedelta64(1, time_unit) > np.timedelta64(1, empirical_unit):
+    if is_empty_array or np.timedelta64(1, time_unit) > np.timedelta64(
+        1, empirical_unit
+    ):
         time_unit = empirical_unit
 
     if time_unit not in {"s", "ms", "us", "ns"}:
@@ -917,12 +928,10 @@ def _cleanup_netcdf_time_units(units: st
     time_units = time_units.lower()
     if not time_units.endswith("s"):
         time_units = f"{time_units}s"
-    try:
+    # don't worry about reifying the units if they're out of bounds or
+    # formatted badly
+    with contextlib.suppress(OutOfBoundsDatetime, ValueError):
         units = f"{time_units} since {format_timestamp(ref_date)}"
-    except (OutOfBoundsDatetime, ValueError):
-        # don't worry about reifying the units if they're out of bounds or
-        # formatted badly
-        pass
     return units
 
 
@@ -937,41 +946,38 @@ def _encode_datetime_with_cftime(dates,
     else:
         cftime = attempt_import("cftime")
 
+    dates = np.asarray(dates)
+    original_shape = dates.shape
+
     if np.issubdtype(dates.dtype, np.datetime64):
         # numpy's broken datetime conversion only works for us precision
         dates = dates.astype("M8[us]").astype(datetime)
 
-    def wrap_dt(dt):
-        # convert to cftime proleptic gregorian in case of datetime.datetime
-        # needed because of https://github.com/Unidata/cftime/issues/354
-        if isinstance(dt, datetime) and not isinstance(dt, cftime.datetime):
-            dt = cftime.datetime(
-                dt.year,
-                dt.month,
-                dt.day,
-                dt.hour,
-                dt.minute,
-                dt.second,
-                dt.microsecond,
-                calendar="proleptic_gregorian",
-            )
-        return dt
+    dates = np.atleast_1d(dates)
 
-    def encode_datetime(d):
-        # Since netCDF files do not support storing float128 values, we ensure
-        # that float64 values are used by setting longdouble=False in num2date.
-        # This try except logic can be removed when xarray's minimum version of
-        # cftime is at least 1.6.2.
-        try:
-            return (
-                np.nan
-                if d is None
-                else cftime.date2num(wrap_dt(d), units, calendar, longdouble=False)
-            )
-        except TypeError:
-            return np.nan if d is None else cftime.date2num(wrap_dt(d), units, calendar)
+    # Find all the None position
+    none_position = dates == None  # noqa: E711
+    filtered_dates = dates[~none_position]
+
+    # Since netCDF files do not support storing float128 values, we ensure
+    # that float64 values are used by setting longdouble=False in num2date.
+    # This try except logic can be removed when xarray's minimum version of
+    # cftime is at least 1.6.2.
+    try:
+        encoded_nums = cftime.date2num(
+            filtered_dates, units, calendar, longdouble=False
+        )
+    except TypeError:
+        encoded_nums = cftime.date2num(filtered_dates, units, calendar)
+
+    if filtered_dates.size == none_position.size:
+        return encoded_nums.reshape(original_shape)
 
-    return reshape(np.array([encode_datetime(d) for d in ravel(dates)]), dates.shape)
+    # Create a full matrix of NaN
+    # And fill the num dates in the not NaN or None position
+    result = np.full(dates.shape, np.nan)
+    result[np.nonzero(~none_position)] = encoded_nums
+    return result.reshape(original_shape)
 
 
 def cast_to_int_if_safe(num) -> np.ndarray:
@@ -1053,24 +1059,19 @@ def _eagerly_encode_cf_datetime(
         calendar = infer_calendar_name(dates)
 
     raise_incompatible_units_error = False
+    raise_gregorian_proleptic_gregorian_mismatch_error = False
     try:
         if not _is_standard_calendar(calendar) or dates.dtype.kind == "O":
             # parse with cftime instead
             raise OutOfBoundsDatetime
         assert np.issubdtype(dates.dtype, "datetime64")
-        if calendar in ["standard", "gregorian"] and np.nanmin(dates).astype(
-            "=M8[us]"
-        ).astype(datetime) < datetime(1582, 10, 15):
-            # if we use standard calendar and for dates before the reform
-            # we need to use cftime instead
-            emit_user_level_warning(
-                f"Unable to encode numpy.datetime64 objects with {calendar} calendar."
-                "Using cftime.datetime objects instead, reason: dates prior "
-                "reform date (1582-10-15). To silence this warning transform "
-                "numpy.datetime64 to corresponding cftime.datetime beforehand.",
-                SerializationWarning,
-            )
-            raise OutOfBoundsDatetime
+        if (
+            calendar in ["standard", "gregorian"]
+            and dates.size > 0
+            and np.nanmin(dates).astype("=M8[us]").astype(datetime)
+            < datetime(1582, 10, 15)
+        ):
+            raise_gregorian_proleptic_gregorian_mismatch_error = True
 
         time_unit, ref_date = _unpack_time_unit_and_ref_date(units)
         # calendar equivalence only for days after the reform
@@ -1154,6 +1155,16 @@ def _eagerly_encode_cf_datetime(
             f"units {units!r}. Consider setting encoding['units'] to {new_units!r} to "
             f"serialize with an integer dtype."
         )
+    if raise_gregorian_proleptic_gregorian_mismatch_error:
+        raise ValueError(
+            f"Unable to encode np.datetime64 values with {calendar} "
+            f"calendar, because some or all values are prior to the reform "
+            f"date of 1582-10-15. To encode these times, set "
+            f"encoding['calendar'] to 'proleptic_gregorian' instead, which "
+            f"is the true calendar that np.datetime64 values use. The "
+            f"'standard' or 'gregorian' calendar is only equivalent to the "
+            f"'proleptic_gregorian' calendar after the reform date."
+        )
 
     return num, units, calendar
 
@@ -1230,6 +1241,9 @@ def _eagerly_encode_cf_timedelta(
     data_units = infer_timedelta_units(timedeltas)
     if units is None:
         units = data_units
+    # units take precedence in the case of zero-size array
+    if timedeltas.size == 0:
+        data_units = units
 
     time_delta = _unit_timedelta_numpy(units)
     time_deltas = pd.TimedeltaIndex(ravel(timedeltas))
@@ -1394,27 +1408,83 @@ class CFDatetimeCoder(VariableCoder):
             return variable
 
 
+def has_timedelta64_encoding_dtype(attrs_or_encoding: dict) -> bool:
+    dtype = attrs_or_encoding.get("dtype")
+    return isinstance(dtype, str) and dtype.startswith("timedelta64")
+
+
+def resolve_time_unit_from_attrs_dtype(
+    attrs_dtype: str, name: T_Name
+) -> PDDatetimeUnitOptions:
+    dtype = np.dtype(attrs_dtype)
+    resolution, _ = np.datetime_data(dtype)
+    resolution = cast(NPDatetimeUnitOptions, resolution)
+    time_unit: PDDatetimeUnitOptions
+    if np.timedelta64(1, resolution) > np.timedelta64(1, "s"):
+        time_unit = "s"
+        message = (
+            f"Following pandas, xarray only supports decoding to timedelta64 "
+            f"values with a resolution of 's', 'ms', 'us', or 'ns'. Encoded "
+            f"values for variable {name!r} have a resolution of "
+            f"{resolution!r}. Attempting to decode to a resolution of 's'. "
+            f"Note, depending on the encoded values, this may lead to an "
+            f"OverflowError. Additionally, data will not be identically round "
+            f"tripped; xarray will choose an encoding dtype of "
+            f"'timedelta64[s]' when re-encoding."
+        )
+        emit_user_level_warning(message)
+    elif np.timedelta64(1, resolution) < np.timedelta64(1, "ns"):
+        time_unit = "ns"
+        message = (
+            f"Following pandas, xarray only supports decoding to timedelta64 "
+            f"values with a resolution of 's', 'ms', 'us', or 'ns'. Encoded "
+            f"values for variable {name!r} have a resolution of "
+            f"{resolution!r}. Attempting to decode to a resolution of 'ns'. "
+            f"Note, depending on the encoded values, this may lead to loss of "
+            f"precision. Additionally, data will not be identically round "
+            f"tripped; xarray will choose an encoding dtype of "
+            f"'timedelta64[ns]' when re-encoding."
+        )
+        emit_user_level_warning(message)
+    else:
+        time_unit = cast(PDDatetimeUnitOptions, resolution)
+    return time_unit
+
+
 class CFTimedeltaCoder(VariableCoder):
     """Coder for CF Timedelta coding.
 
     Parameters
     ----------
     time_unit : PDDatetimeUnitOptions
-          Target resolution when decoding timedeltas. Defaults to "ns".
+        Target resolution when decoding timedeltas via units. Defaults to "ns".
+        When decoding via dtype, the resolution is specified in the dtype
+        attribute, so this parameter is ignored.
+    decode_via_units : bool
+        Whether to decode timedeltas based on the presence of a timedelta-like
+        units attribute, e.g. "seconds". Defaults to True, but in the future
+        will default to False.
+    decode_via_dtype : bool
+        Whether to decode timedeltas based on the presence of a np.timedelta64
+        dtype attribute, e.g. "timedelta64[s]". Defaults to True.
     """
 
     def __init__(
         self,
-        time_unit: PDDatetimeUnitOptions = "ns",
+        time_unit: PDDatetimeUnitOptions | None = None,
+        decode_via_units: bool = True,
+        decode_via_dtype: bool = True,
     ) -> None:
         self.time_unit = time_unit
+        self.decode_via_units = decode_via_units
+        self.decode_via_dtype = decode_via_dtype
         self._emit_decode_timedelta_future_warning = False
 
     def encode(self, variable: Variable, name: T_Name = None) -> Variable:
         if np.issubdtype(variable.data.dtype, np.timedelta64):
             dims, data, attrs, encoding = unpack_for_encoding(variable)
-
             dtype = encoding.get("dtype", None)
+            units = encoding.pop("units", None)
 
             # in the case of packed data we need to encode into
             # float first, the correct dtype will be established
@@ -1422,34 +1492,66 @@ class CFTimedeltaCoder(VariableCoder):
             if "add_offset" in encoding or "scale_factor" in encoding:
                 dtype = data.dtype if data.dtype.kind == "f" else "float64"
 
-            data, units = encode_cf_timedelta(data, encoding.pop("units", None), dtype)
+            resolution, _ = np.datetime_data(variable.dtype)
+            attrs_dtype = f"timedelta64[{resolution}]"
+            safe_setitem(attrs, "dtype", attrs_dtype, name=name)
 
+            data, units = encode_cf_timedelta(data, units, dtype)
             safe_setitem(attrs, "units", units, name=name)
-
             return Variable(dims, data, attrs, encoding, fastpath=True)
         else:
             return variable
 
     def decode(self, variable: Variable, name: T_Name = None) -> Variable:
         units = variable.attrs.get("units", None)
-        if isinstance(units, str) and units in TIME_UNITS:
-            if self._emit_decode_timedelta_future_warning:
-                emit_user_level_warning(
-                    "In a future version of xarray decode_timedelta will "
-                    "default to False rather than None. To silence this "
-                    "warning, set decode_timedelta to True, False, or a "
-                    "'CFTimedeltaCoder' instance.",
-                    FutureWarning,
-                )
+        has_timedelta_units = isinstance(units, str) and units in TIME_UNITS
+        has_timedelta_dtype = has_timedelta64_encoding_dtype(variable.attrs)
+        is_dtype_decodable = has_timedelta_units and has_timedelta_dtype
+        is_units_decodable = has_timedelta_units
+        if (is_dtype_decodable and self.decode_via_dtype) or (
+            is_units_decodable and self.decode_via_units
+        ):
             dims, data, attrs, encoding = unpack_for_decoding(variable)
-
             units = pop_to(attrs, encoding, "units")
-            dtype = np.dtype(f"timedelta64[{self.time_unit}]")
-            transform = partial(
-                decode_cf_timedelta, units=units, time_unit=self.time_unit
-            )
-            data = lazy_elemwise_func(data, transform, dtype=dtype)
+            if is_dtype_decodable:
+                attrs_dtype = attrs.pop("dtype")
+                if self.time_unit is None:
+                    time_unit = resolve_time_unit_from_attrs_dtype(attrs_dtype, name)
+                else:
+                    time_unit = self.time_unit
+            else:
+                if self._emit_decode_timedelta_future_warning:
+                    var_string = f"the variable {name!r}" if name else ""
+                    emit_user_level_warning(
+                        "In a future version, xarray will not decode "
+                        f"{var_string} into a timedelta64 dtype based on the "
+                        "presence of a timedelta-like 'units' attribute by "
+                        "default. Instead it will rely on the presence of a "
+                        "timedelta64 'dtype' attribute, which is now xarray's "
+                        "default way of encoding timedelta64 values.\n"
+                        "To continue decoding into a timedelta64 dtype, either "
+                        "set `decode_timedelta=True` when opening this "
+                        "dataset, or add the attribute "
+                        "`dtype='timedelta64[ns]'` to this variable on disk.\n"
+                        "To opt-in to future behavior, set "
+                        "`decode_timedelta=False`.",
+                        FutureWarning,
+                    )
+                if self.time_unit is None:
+                    time_unit = "ns"
+                else:
+                    time_unit = self.time_unit
+
+                # Handle edge case that decode_via_dtype=False and
+                # decode_via_units=True, and timedeltas were encoded with a
+                # dtype attribute. We need to remove the dtype attribute
+                # to prevent an error during round tripping.
+                if has_timedelta_dtype:
+                    attrs.pop("dtype")
 
+            dtype = np.dtype(f"timedelta64[{time_unit}]")
+            transform = partial(decode_cf_timedelta, units=units, time_unit=time_unit)
+            data = lazy_elemwise_func(data, transform, dtype=dtype)
             return Variable(dims, data, attrs, encoding, fastpath=True)
         else:
             return variable
diff -pruN 2025.03.1-8/xarray/coding/variables.py 2025.10.1-1/xarray/coding/variables.py
--- 2025.03.1-8/xarray/coding/variables.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/xarray/coding/variables.py	2025-10-10 10:38:05.000000000 +0000
@@ -21,6 +21,7 @@ from xarray.coding.common import (
 )
 from xarray.coding.times import CFDatetimeCoder, CFTimedeltaCoder
 from xarray.core import dtypes, duck_array_ops, indexing
+from xarray.core.types import Self
 from xarray.core.variable import Variable
 
 if TYPE_CHECKING:
@@ -58,13 +59,19 @@ class NativeEndiannessArray(indexing.Exp
         return np.dtype(self.array.dtype.kind + str(self.array.dtype.itemsize))
 
     def _oindex_get(self, key):
-        return np.asarray(self.array.oindex[key], dtype=self.dtype)
+        return type(self)(self.array.oindex[key])
 
     def _vindex_get(self, key):
-        return np.asarray(self.array.vindex[key], dtype=self.dtype)
+        return type(self)(self.array.vindex[key])
 
-    def __getitem__(self, key) -> np.ndarray:
-        return np.asarray(self.array[key], dtype=self.dtype)
+    def __getitem__(self, key) -> Self:
+        return type(self)(self.array[key])
+
+    def get_duck_array(self):
+        return duck_array_ops.astype(self.array.get_duck_array(), dtype=self.dtype)
+
+    def transpose(self, order):
+        return type(self)(self.array.transpose(order))
 
 
 class BoolTypeArray(indexing.ExplicitlyIndexedNDArrayMixin):
@@ -96,20 +103,26 @@ class BoolTypeArray(indexing.ExplicitlyI
         return np.dtype("bool")
 
     def _oindex_get(self, key):
-        return np.asarray(self.array.oindex[key], dtype=self.dtype)
+        return type(self)(self.array.oindex[key])
 
     def _vindex_get(self, key):
-        return np.asarray(self.array.vindex[key], dtype=self.dtype)
+        return type(self)(self.array.vindex[key])
+
+    def __getitem__(self, key) -> Self:
+        return type(self)(self.array[key])
 
-    def __getitem__(self, key) -> np.ndarray:
-        return np.asarray(self.array[key], dtype=self.dtype)
+    def get_duck_array(self):
+        return duck_array_ops.astype(self.array.get_duck_array(), dtype=self.dtype)
+
+    def transpose(self, order):
+        return type(self)(self.array.transpose(order))
 
 
 def _apply_mask(
     data: np.ndarray,
     encoded_fill_values: list,
     decoded_fill_value: Any,
-    dtype: np.typing.DTypeLike,
+    dtype: np.typing.DTypeLike | None,
 ) -> np.ndarray:
     """Mask all matching values in a NumPy arrays."""
     data = np.asarray(data, dtype=dtype)
@@ -157,10 +170,8 @@ def _check_fill_values(attrs, name, dtyp
     Issue SerializationWarning if appropriate.
     """
     raw_fill_dict = {}
-    [
+    for attr in ("missing_value", "_FillValue"):
         pop_to(attrs, raw_fill_dict, attr, name=name)
-        for attr in ("missing_value", "_FillValue")
-    ]
     encoded_fill_values = set()
     for k in list(raw_fill_dict):
         v = raw_fill_dict[k]
@@ -291,15 +302,12 @@ class CFMaskCoder(VariableCoder):
         if fv_exists:
             # Ensure _FillValue is cast to same dtype as data's
             # but not for packed data
-            encoding["_FillValue"] = (
-                _encode_unsigned_fill_value(name, fv, dtype)
-                if has_unsigned
-                else (
-                    dtype.type(fv)
-                    if "add_offset" not in encoding and "scale_factor" not in encoding
-                    else fv
-                )
-            )
+            if has_unsigned:
+                encoding["_FillValue"] = _encode_unsigned_fill_value(name, fv, dtype)
+            elif "add_offset" not in encoding and "scale_factor" not in encoding:
+                encoding["_FillValue"] = dtype.type(fv)
+            else:
+                encoding["_FillValue"] = fv
             fill_value = pop_to(encoding, attrs, "_FillValue", name=name)
 
         if mv_exists:
@@ -345,7 +353,17 @@ class CFMaskCoder(VariableCoder):
         if fill_value is not None and has_unsigned:
             pop_to(encoding, attrs, "_Unsigned")
             # XXX: Is this actually needed? Doesn't the backend handle this?
-            data = duck_array_ops.astype(duck_array_ops.around(data), dtype)
+            # two-stage casting to prevent undefined cast from float to unsigned int
+            # first float -> int with corresponding itemsize
+            # second int -> int/uint to final itemsize
+            signed_dtype = np.dtype(f"i{data.itemsize}")
+            data = duck_array_ops.astype(
+                duck_array_ops.astype(
+                    duck_array_ops.around(data), signed_dtype, copy=False
+                ),
+                dtype,
+                copy=False,
+            )
             attrs["_FillValue"] = fill_value
 
         return Variable(dims, data, attrs, encoding, fastpath=True)
@@ -359,11 +377,9 @@ class CFMaskCoder(VariableCoder):
 
         dims, data, attrs, encoding = unpack_for_decoding(variable)
 
-        # Even if _Unsigned is use, retain on-disk _FillValue
-        [
+        # Even if _Unsigned is used, retain on-disk _FillValue
+        for attr, value in raw_fill_dict.items():
             safe_setitem(encoding, attr, value, name=name)
-            for attr, value in raw_fill_dict.items()
-        ]
 
         if "_Unsigned" in attrs:
             unsigned = pop_to(attrs, encoding, "_Unsigned")
@@ -410,7 +426,9 @@ class CFMaskCoder(VariableCoder):
         return Variable(dims, data, attrs, encoding, fastpath=True)
 
 
-def _scale_offset_decoding(data, scale_factor, add_offset, dtype: np.typing.DTypeLike):
+def _scale_offset_decoding(
+    data, scale_factor, add_offset, dtype: np.typing.DTypeLike | None
+):
     data = data.astype(dtype=dtype, copy=True)
     if scale_factor is not None:
         data *= scale_factor
@@ -510,9 +528,9 @@ class CFScaleOffsetCoder(VariableCoder):
 
             scale_factor = pop_to(attrs, encoding, "scale_factor", name=name)
             add_offset = pop_to(attrs, encoding, "add_offset", name=name)
-            if np.ndim(scale_factor) > 0:
+            if duck_array_ops.ndim(scale_factor) > 0:
                 scale_factor = np.asarray(scale_factor).item()
-            if np.ndim(add_offset) > 0:
+            if duck_array_ops.ndim(add_offset) > 0:
                 add_offset = np.asarray(add_offset).item()
             # if we have a _FillValue/masked_value in encoding we already have the wanted
             # floating point dtype here (via CFMaskCoder), so no check is necessary
diff -pruN 2025.03.1-8/xarray/compat/toolzcompat.py 2025.10.1-1/xarray/compat/toolzcompat.py
--- 2025.03.1-8/xarray/compat/toolzcompat.py	1970-01-01 00:00:00.000000000 +0000
+++ 2025.10.1-1/xarray/compat/toolzcompat.py	2025-10-10 10:38:05.000000000 +0000
@@ -0,0 +1,56 @@
+# This file contains functions copied from the toolz library in accordance
+# with its license. The original copyright notice is duplicated below.
+
+# Copyright (c) 2013 Matthew Rocklin
+
+# All rights reserved.
+
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+
+#   a. Redistributions of source code must retain the above copyright notice,
+#      this list of conditions and the following disclaimer.
+#   b. Redistributions in binary form must reproduce the above copyright
+#      notice, this list of conditions and the following disclaimer in the
+#      documentation and/or other materials provided with the distribution.
+#   c. Neither the name of toolz nor the names of its contributors
+#      may be used to endorse or promote products derived from this software
+#      without specific prior written permission.
+
+
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR
+# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+# DAMAGE.
+
+
+def sliding_window(n, seq):
+    """A sequence of overlapping subsequences
+
+    >>> list(sliding_window(2, [1, 2, 3, 4]))
+    [(1, 2), (2, 3), (3, 4)]
+
+    This function creates a sliding window suitable for transformations like
+    sliding means / smoothing
+
+    >>> mean = lambda seq: float(sum(seq)) / len(seq)
+    >>> list(map(mean, sliding_window(2, [1, 2, 3, 4])))
+    [1.5, 2.5, 3.5]
+    """
+    import collections
+    import itertools
+
+    return zip(
+        *(
+            collections.deque(itertools.islice(it, i), 0) or it
+            for i, it in enumerate(itertools.tee(seq, n))
+        ),
+        strict=False,
+    )
diff -pruN 2025.03.1-8/xarray/computation/apply_ufunc.py 2025.10.1-1/xarray/computation/apply_ufunc.py
--- 2025.03.1-8/xarray/computation/apply_ufunc.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/xarray/computation/apply_ufunc.py	2025-10-10 10:38:05.000000000 +0000
@@ -16,24 +16,19 @@ from collections.abc import (
     Iterator,
     Mapping,
     Sequence,
-    Set,
 )
-from typing import TYPE_CHECKING, Any, Literal, TypeVar, Union
+from collections.abc import (
+    Set as AbstractSet,
+)
+from typing import TYPE_CHECKING, Any, Literal
 
 import numpy as np
 
-_T = TypeVar("_T", bound=Union["Dataset", "DataArray"])
-_U = TypeVar("_U", bound=Union["Dataset", "DataArray"])
-_V = TypeVar("_V", bound=Union["Dataset", "DataArray"])
-
 from xarray.core import duck_array_ops, utils
 from xarray.core.formatting import limit_lines
 from xarray.core.indexes import Index, filter_indexes_from_coords
 from xarray.core.options import _get_keep_attrs
-from xarray.core.utils import (
-    is_dict_like,
-    result_name,
-)
+from xarray.core.utils import is_dict_like, result_name
 from xarray.core.variable import Variable
 from xarray.namedarray.parallelcompat import get_chunked_array_type
 from xarray.namedarray.pycompat import is_chunked_array
@@ -146,8 +141,13 @@ class _UFuncSignature:
         return f"{type(self).__name__}({list(self.input_core_dims)!r}, {list(self.output_core_dims)!r})"
 
     def __str__(self):
-        lhs = ",".join("({})".format(",".join(dims)) for dims in self.input_core_dims)
-        rhs = ",".join("({})".format(",".join(dims)) for dims in self.output_core_dims)
+        comma_separated = ",".join
+        lhs = comma_separated(
+            f"({comma_separated(dims)})" for dims in self.input_core_dims
+        )
+        rhs = comma_separated(
+            f"({comma_separated(dims)})" for dims in self.output_core_dims
+        )
         return f"{lhs}->{rhs}"
 
     def to_gufunc_string(self, exclude_dims=frozenset()):
@@ -203,7 +203,7 @@ def _get_coords_list(args: Iterable[Any]
 def build_output_coords_and_indexes(
     args: Iterable[Any],
     signature: _UFuncSignature,
-    exclude_dims: Set = frozenset(),
+    exclude_dims: AbstractSet = frozenset(),
     combine_attrs: CombineAttrsOptions = "override",
 ) -> tuple[list[dict[Any, Variable]], list[dict[Any, Index]]]:
     """Build output coordinates and indexes for an operation.
@@ -448,17 +448,16 @@ def apply_dict_of_variables_vfunc(
         core_dim_present = _check_core_dims(signature, variable_args, name)
         if core_dim_present is True:
             result_vars[name] = func(*variable_args)
+        elif on_missing_core_dim == "raise":
+            raise ValueError(core_dim_present)
+        elif on_missing_core_dim == "copy":
+            result_vars[name] = variable_args[0]
+        elif on_missing_core_dim == "drop":
+            pass
         else:
-            if on_missing_core_dim == "raise":
-                raise ValueError(core_dim_present)
-            elif on_missing_core_dim == "copy":
-                result_vars[name] = variable_args[0]
-            elif on_missing_core_dim == "drop":
-                pass
-            else:
-                raise ValueError(
-                    f"Invalid value for `on_missing_core_dim`: {on_missing_core_dim!r}"
-                )
+            raise ValueError(
+                f"Invalid value for `on_missing_core_dim`: {on_missing_core_dim!r}"
+            )
 
     if signature.num_outputs > 1:
         return _unpack_dict_tuples(result_vars, signature.num_outputs)
@@ -535,8 +534,10 @@ def apply_dataset_vfunc(
     out: Dataset | tuple[Dataset, ...]
     if signature.num_outputs > 1:
         out = tuple(
-            _fast_dataset(*args)
-            for args in zip(result_vars, list_of_coords, list_of_indexes, strict=True)
+            itertools.starmap(
+                _fast_dataset,
+                zip(result_vars, list_of_coords, list_of_indexes, strict=True),
+            )
         )
     else:
         (coord_vars,) = list_of_coords
@@ -573,7 +574,6 @@ def apply_groupby_func(func, *args):
     DataArray, Variable and/or ndarray objects.
     """
     from xarray.core.groupby import GroupBy, peek_at
-    from xarray.core.variable import Variable
 
     groupbys = [arg for arg in args if isinstance(arg, GroupBy)]
     assert groupbys, "must have at least one groupby to iterate over"
@@ -606,9 +606,7 @@ def apply_groupby_func(func, *args):
             iterator = itertools.repeat(arg)
         iterators.append(iterator)
 
-    applied: Iterator = (
-        func(*zipped_args) for zipped_args in zip(*iterators, strict=False)
-    )
+    applied: Iterator = itertools.starmap(func, zip(*iterators, strict=False))
     applied_example, applied = peek_at(applied)
     combine = first_groupby._combine  # type: ignore[attr-defined]
     if isinstance(applied_example, tuple):
@@ -619,7 +617,7 @@ def apply_groupby_func(func, *args):
 
 
 def unified_dim_sizes(
-    variables: Iterable[Variable], exclude_dims: Set = frozenset()
+    variables: Iterable[Variable], exclude_dims: AbstractSet = frozenset()
 ) -> dict[Hashable, int]:
     dim_sizes: dict[Hashable, int] = {}
 
@@ -717,7 +715,7 @@ def apply_variable_ufunc(
 ) -> Variable | tuple[Variable, ...]:
     """Apply a ndarray level function over Variable and/or ndarray objects."""
     from xarray.core.formatting import short_array_repr
-    from xarray.core.variable import Variable, as_compatible_data
+    from xarray.core.variable import as_compatible_data
 
     dim_sizes = unified_dim_sizes(
         (a for a in args if hasattr(a, "dims")), exclude_dims=exclude_dims
@@ -812,11 +810,10 @@ def apply_variable_ufunc(
             raise ValueError(
                 f"unknown setting for chunked array handling in apply_ufunc: {dask}"
             )
-    else:
-        if vectorize:
-            func = _vectorize(
-                func, signature, output_dtypes=output_dtypes, exclude_dims=exclude_dims
-            )
+    elif vectorize:
+        func = _vectorize(
+            func, signature, output_dtypes=output_dtypes, exclude_dims=exclude_dims
+        )
 
     result_data = func(*input_data)
 
@@ -899,7 +896,7 @@ def apply_ufunc(
     *args: Any,
     input_core_dims: Sequence[Sequence] | None = None,
     output_core_dims: Sequence[Sequence] | None = ((),),
-    exclude_dims: Set = frozenset(),
+    exclude_dims: AbstractSet = frozenset(),
     vectorize: bool = False,
     join: JoinOptions = "exact",
     dataset_join: str = "exact",
@@ -1212,6 +1209,8 @@ def apply_ufunc(
             dask_gufunc_kwargs.setdefault("output_sizes", output_sizes)
 
     if kwargs:
+        if "where" in kwargs and isinstance(kwargs["where"], DataArray):
+            kwargs["where"] = kwargs["where"].data  # type:ignore[index]
         func = functools.partial(func, **kwargs)
 
     if keep_attrs is None:
diff -pruN 2025.03.1-8/xarray/computation/computation.py 2025.10.1-1/xarray/computation/computation.py
--- 2025.03.1-8/xarray/computation/computation.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/xarray/computation/computation.py	2025-10-10 10:38:05.000000000 +0000
@@ -144,9 +144,8 @@ def cov(
             "Only xr.DataArray is supported."
             f"Given {[type(arr) for arr in [da_a, da_b]]}."
         )
-    if weights is not None:
-        if not isinstance(weights, DataArray):
-            raise TypeError(f"Only xr.DataArray is supported. Given {type(weights)}.")
+    if weights is not None and not isinstance(weights, DataArray):
+        raise TypeError(f"Only xr.DataArray is supported. Given {type(weights)}.")
     return _cov_corr(da_a, da_b, weights=weights, dim=dim, ddof=ddof, method="cov")
 
 
@@ -248,9 +247,8 @@ def corr(
             "Only xr.DataArray is supported."
             f"Given {[type(arr) for arr in [da_a, da_b]]}."
         )
-    if weights is not None:
-        if not isinstance(weights, DataArray):
-            raise TypeError(f"Only xr.DataArray is supported. Given {type(weights)}.")
+    if weights is not None and not isinstance(weights, DataArray):
+        raise TypeError(f"Only xr.DataArray is supported. Given {type(weights)}.")
     return _cov_corr(da_a, da_b, weights=weights, dim=dim, method="corr")
 
 
@@ -260,7 +258,7 @@ def _cov_corr(
     weights: T_DataArray | None = None,
     dim: Dims = None,
     ddof: int = 0,
-    method: Literal["cov", "corr", None] = None,
+    method: Literal["cov", "corr"] | None = None,
 ) -> T_DataArray:
     """
     Internal method for xr.cov() and xr.corr() so only have to
@@ -576,7 +574,6 @@ def dot(
     array(235)
     """
     from xarray.core.dataarray import DataArray
-    from xarray.core.variable import Variable
 
     if any(not isinstance(arr, Variable | DataArray) for arr in arrays):
         raise TypeError(
@@ -933,7 +930,7 @@ def _calc_idxminmax(
         array = array.where(~allna, 0)
 
     # This will run argmin or argmax.
-    indx = func(array, dim=dim, axis=None, keep_attrs=keep_attrs, skipna=skipna)
+    index = func(array, dim=dim, axis=None, keep_attrs=keep_attrs, skipna=skipna)
 
     # Handle chunked arrays (e.g. dask).
     coord = array[dim]._variable.to_base_variable()
@@ -946,13 +943,13 @@ def _calc_idxminmax(
     else:
         coord = coord.copy(data=to_like_array(array[dim].data, array.data))
 
-    res = indx._replace(coord[(indx.variable,)]).rename(dim)
+    res = index._replace(coord[(index.variable,)]).rename(dim)
 
     if skipna or (skipna is None and array.dtype.kind in na_dtypes):
         # Put the NaN values back in after removing them
         res = res.where(~allna, fill_value)
 
     # Copy attributes from argmin/argmax, if any
-    res.attrs = indx.attrs
+    res.attrs = index.attrs
 
     return res
diff -pruN 2025.03.1-8/xarray/computation/fit.py 2025.10.1-1/xarray/computation/fit.py
--- 2025.03.1-8/xarray/computation/fit.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/xarray/computation/fit.py	2025-10-10 10:38:05.000000000 +0000
@@ -80,8 +80,8 @@ def _initialize_curvefit_params(params,
         )
         return p0
 
-    param_defaults = {p: 1 for p in params}
-    bounds_defaults = {p: (-np.inf, np.inf) for p in params}
+    param_defaults = dict.fromkeys(params, 1)
+    bounds_defaults = dict.fromkeys(params, (-np.inf, np.inf))
     for p in params:
         if p in func_args and func_args[p].default is not func_args[p].empty:
             param_defaults[p] = func_args[p].default
@@ -103,7 +103,7 @@ def polyfit(
     dim: Hashable,
     deg: int,
     skipna: bool | None = None,
-    rcond: float | None = None,
+    rcond: np.floating[Any] | float | None = None,
     w: Hashable | Any = None,
     full: bool = False,
     cov: bool | Literal["unscaled"] = False,
@@ -278,6 +278,7 @@ def polyfit(
                 dims=other_dims,
             )
 
+        fac: Variable | int
         if cov:
             Vbase = np.linalg.inv(np.dot(lhs.T, lhs))
             Vbase /= np.outer(scale, scale)
@@ -473,7 +474,7 @@ def curvefit(
             mask = np.all([np.any(~np.isnan(x), axis=0), ~np.isnan(y)], axis=0)
             x = x[:, mask]
             y = y[mask]
-            if not len(y):
+            if y.size == 0:
                 popt = np.full([n_params], np.nan)
                 pcov = np.full([n_params, n_params], np.nan)
                 return popt, pcov
diff -pruN 2025.03.1-8/xarray/computation/nanops.py 2025.10.1-1/xarray/computation/nanops.py
--- 2025.03.1-8/xarray/computation/nanops.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/xarray/computation/nanops.py	2025-10-10 10:38:05.000000000 +0000
@@ -105,14 +105,12 @@ def nansum(a, axis=None, dtype=None, out
 
 def _nanmean_ddof_object(ddof, value, axis=None, dtype=None, **kwargs):
     """In house nanmean. ddof argument will be used in _nanvar method"""
-    from xarray.core.duck_array_ops import count, fillna, where_method
-
     valid_count = count(value, axis=axis)
     value = fillna(value, 0)
     # As dtype inference is impossible for object dtype, we assume float
     # https://github.com/dask/dask/issues/3162
     if dtype is None and value.dtype.kind == "O":
-        dtype = value.dtype if value.dtype.kind in ["cf"] else float
+        dtype = float
 
     data = np.sum(value, axis=axis, dtype=dtype, **kwargs)
     data = data / (valid_count - ddof)
diff -pruN 2025.03.1-8/xarray/computation/ops.py 2025.10.1-1/xarray/computation/ops.py
--- 2025.03.1-8/xarray/computation/ops.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/xarray/computation/ops.py	2025-10-10 10:38:05.000000000 +0000
@@ -8,12 +8,15 @@ functions.
 from __future__ import annotations
 
 import operator
-from typing import Literal
+from typing import TYPE_CHECKING, Literal
 
 import numpy as np
 
 from xarray.core import dtypes, duck_array_ops
 
+if TYPE_CHECKING:
+    pass
+
 try:
     import bottleneck as bn
 
@@ -158,8 +161,8 @@ def fillna(data, other, join="left", dat
     )
 
 
-# Unsure why we get a mypy error here
-def where_method(self, cond, other=dtypes.NA):  # type: ignore[has-type]
+# TODO: type this properly
+def where_method(self, cond, other=dtypes.NA):  # type: ignore[unused-ignore,has-type]
     """Return elements from `self` or `other` depending on `cond`.
 
     Parameters
@@ -283,7 +286,7 @@ def inplace_to_noninplace_op(f):
 # _typed_ops.py uses the following wrapped functions as a kind of unary operator
 argsort = _method_wrapper("argsort")
 conj = _method_wrapper("conj")
-conjugate = _method_wrapper("conjugate")
+conjugate = _method_wrapper("conj")
 round_ = _func_slash_method_wrapper(duck_array_ops.around, name="round")
 
 
diff -pruN 2025.03.1-8/xarray/computation/rolling.py 2025.10.1-1/xarray/computation/rolling.py
--- 2025.03.1-8/xarray/computation/rolling.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/xarray/computation/rolling.py	2025-10-10 10:38:05.000000000 +0000
@@ -131,13 +131,11 @@ class Rolling(Generic[T_Xarray]):
     def __repr__(self) -> str:
         """provide a nice str repr of our rolling object"""
 
-        attrs = [
-            "{k}->{v}{c}".format(k=k, v=w, c="(center)" if c else "")
+        attrs = ",".join(
+            f"{k}->{w}{'(center)' if c else ''}"
             for k, w, c in zip(self.dim, self.window, self.center, strict=True)
-        ]
-        return "{klass} [{attrs}]".format(
-            klass=self.__class__.__name__, attrs=",".join(attrs)
         )
+        return f"{self.__class__.__name__} [{attrs}]"
 
     def __len__(self) -> int:
         return math.prod(self.obj.sizes[d] for d in self.dim)
@@ -195,11 +193,15 @@ class Rolling(Generic[T_Xarray]):
         return method
 
     def _mean(self, keep_attrs, **kwargs):
-        result = self.sum(keep_attrs=False, **kwargs) / duck_array_ops.astype(
-            self.count(keep_attrs=False), dtype=self.obj.dtype, copy=False
+        result = self.sum(keep_attrs=False, **kwargs)
+        # use dtype of result for casting of count
+        # this allows for GH #7062 and GH #8864, fixes GH #10340
+        result /= duck_array_ops.astype(
+            self.count(keep_attrs=False), dtype=result.dtype, copy=False
         )
         if keep_attrs:
             result.attrs = self.obj.attrs
+
         return result
 
     _mean.__doc__ = _ROLLING_REDUCE_DOCSTRING_TEMPLATE.format(name="mean")
@@ -1087,7 +1089,7 @@ class Coarsen(CoarsenArithmetic, Generic
         if utils.is_dict_like(coord_func):
             coord_func_map = coord_func
         else:
-            coord_func_map = {d: coord_func for d in self.obj.dims}
+            coord_func_map = dict.fromkeys(self.obj.dims, coord_func)
         for c in self.obj.coords:
             if c not in coord_func_map:
                 coord_func_map[c] = duck_array_ops.mean  # type: ignore[index]
@@ -1102,14 +1104,12 @@ class Coarsen(CoarsenArithmetic, Generic
     def __repr__(self) -> str:
         """provide a nice str repr of our coarsen object"""
 
-        attrs = [
+        attrs = ",".join(
             f"{k}->{getattr(self, k)}"
             for k in self._attributes
             if getattr(self, k, None) is not None
-        ]
-        return "{klass} [{attrs}]".format(
-            klass=self.__class__.__name__, attrs=",".join(attrs)
         )
+        return f"{self.__class__.__name__} [{attrs}]"
 
     def construct(
         self,
@@ -1249,18 +1249,17 @@ class DataArrayCoarsen(Coarsen["DataArra
             for c, v in self.obj.coords.items():
                 if c == self.obj.name:
                     coords[c] = reduced
+                elif any(d in self.windows for d in v.dims):
+                    coords[c] = v.variable.coarsen(
+                        self.windows,
+                        self.coord_func[c],
+                        self.boundary,
+                        self.side,
+                        keep_attrs,
+                        **kwargs,
+                    )
                 else:
-                    if any(d in self.windows for d in v.dims):
-                        coords[c] = v.variable.coarsen(
-                            self.windows,
-                            self.coord_func[c],
-                            self.boundary,
-                            self.side,
-                            keep_attrs,
-                            **kwargs,
-                        )
-                    else:
-                        coords[c] = v
+                    coords[c] = v
             return DataArray(
                 reduced, dims=self.obj.dims, coords=coords, name=self.obj.name
             )
diff -pruN 2025.03.1-8/xarray/conventions.py 2025.10.1-1/xarray/conventions.py
--- 2025.03.1-8/xarray/conventions.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/xarray/conventions.py	2025-10-10 10:38:05.000000000 +0000
@@ -4,7 +4,7 @@ import itertools
 import warnings
 from collections import defaultdict
 from collections.abc import Hashable, Iterable, Mapping, MutableMapping
-from typing import TYPE_CHECKING, Any, Literal, TypeVar, Union
+from typing import TYPE_CHECKING, Any, Literal, TypeVar, Union, cast
 
 import numpy as np
 
@@ -18,7 +18,7 @@ from xarray.core.common import (
 )
 from xarray.core.utils import emit_user_level_warning
 from xarray.core.variable import IndexVariable, Variable
-from xarray.namedarray.utils import is_duck_dask_array
+from xarray.namedarray.utils import is_duck_array
 
 CF_RELATED_DATA = (
     "bounds",
@@ -178,7 +178,7 @@ def decode_cf_variable(
         if isinstance(decode_times, CFDatetimeCoder):
             decode_timedelta = CFTimedeltaCoder(time_unit=decode_times.time_unit)
         else:
-            decode_timedelta = True if decode_times else False
+            decode_timedelta = bool(decode_times)
 
     if concat_characters:
         if stack_char_dim:
@@ -204,8 +204,10 @@ def decode_cf_variable(
             var = coder.decode(var, name=name)
 
     if decode_timedelta:
-        if not isinstance(decode_timedelta, CFTimedeltaCoder):
-            decode_timedelta = CFTimedeltaCoder()
+        if isinstance(decode_timedelta, bool):
+            decode_timedelta = CFTimedeltaCoder(
+                decode_via_units=decode_timedelta, decode_via_dtype=decode_timedelta
+            )
         decode_timedelta._emit_decode_timedelta_future_warning = (
             decode_timedelta_was_none
         )
@@ -224,17 +226,16 @@ def decode_cf_variable(
                     DeprecationWarning,
                 )
             decode_times = CFDatetimeCoder(use_cftime=use_cftime)
-        else:
-            if use_cftime is not None:
-                raise TypeError(
-                    "Usage of 'use_cftime' as a kwarg is not allowed "
-                    "if a 'CFDatetimeCoder' instance is passed to "
-                    "'decode_times'. Please set 'use_cftime' "
-                    "when initializing 'CFDatetimeCoder' instead.\n"
-                    "Example usage:\n"
-                    "    time_coder = xr.coders.CFDatetimeCoder(use_cftime=True)\n"
-                    "    ds = xr.open_dataset(decode_times=time_coder)\n",
-                )
+        elif use_cftime is not None:
+            raise TypeError(
+                "Usage of 'use_cftime' as a kwarg is not allowed "
+                "if a 'CFDatetimeCoder' instance is passed to "
+                "'decode_times'. Please set 'use_cftime' "
+                "when initializing 'CFDatetimeCoder' instead.\n"
+                "Example usage:\n"
+                "    time_coder = xr.coders.CFDatetimeCoder(use_cftime=True)\n"
+                "    ds = xr.open_dataset(decode_times=time_coder)\n",
+            )
         var = decode_times.decode(var, name=name)
 
     if decode_endianness and not var.dtype.isnative:
@@ -247,7 +248,15 @@ def decode_cf_variable(
 
     encoding.setdefault("dtype", original_dtype)
 
-    if not is_duck_dask_array(data):
+    if (
+        # we don't need to lazily index duck arrays
+        not is_duck_array(data)
+        # These arrays already support lazy indexing
+        # OR for IndexingAdapters, it makes no sense to wrap them
+        and not isinstance(data, indexing.ExplicitlyIndexedNDArrayMixin)
+    ):
+        # this path applies to bare BackendArray objects.
+        # It is not hit for any internal Xarray backend
         data = indexing.LazilyIndexedArray(data)
 
     return Variable(dimensions, data, attributes, encoding=encoding, fastpath=True)
@@ -274,12 +283,11 @@ def _update_bounds_attributes(variables:
         attrs = v.attrs
         units = attrs.get("units")
         has_date_units = isinstance(units, str) and "since" in units
-        if has_date_units and "bounds" in attrs:
-            if attrs["bounds"] in variables:
-                bounds_attrs = variables[attrs["bounds"]].attrs
-                bounds_attrs.setdefault("units", attrs["units"])
-                if "calendar" in attrs:
-                    bounds_attrs.setdefault("calendar", attrs["calendar"])
+        if has_date_units and "bounds" in attrs and attrs["bounds"] in variables:
+            bounds_attrs = variables[attrs["bounds"]].attrs
+            bounds_attrs.setdefault("units", attrs["units"])
+            if "calendar" in attrs:
+                bounds_attrs.setdefault("calendar", attrs["calendar"])
 
 
 def _update_bounds_encoding(variables: T_Variables) -> None:
@@ -323,12 +331,11 @@ def _update_bounds_encoding(variables: T
                 f"{name} before writing to a file.",
             )
 
-        if has_date_units and "bounds" in attrs:
-            if attrs["bounds"] in variables:
-                bounds_encoding = variables[attrs["bounds"]].encoding
-                bounds_encoding.setdefault("units", encoding["units"])
-                if "calendar" in encoding:
-                    bounds_encoding.setdefault("calendar", encoding["calendar"])
+        if has_date_units and "bounds" in attrs and attrs["bounds"] in variables:
+            bounds_encoding = variables[attrs["bounds"]].encoding
+            bounds_encoding.setdefault("units", encoding["units"])
+            if "calendar" in encoding:
+                bounds_encoding.setdefault("calendar", encoding["calendar"])
 
 
 T = TypeVar("T")
@@ -407,7 +414,9 @@ def decode_cf_variables(
                 v,
                 concat_characters=_item_or_default(concat_characters, k, True),
                 mask_and_scale=_item_or_default(mask_and_scale, k, True),
-                decode_times=_item_or_default(decode_times, k, True),
+                decode_times=cast(
+                    bool | CFDatetimeCoder, _item_or_default(decode_times, k, True)
+                ),
                 stack_char_dim=stack_char_dim,
                 use_cftime=_item_or_default(use_cftime, k, None),
                 decode_timedelta=_item_or_default(decode_timedelta, k, None),
@@ -785,7 +794,13 @@ def cf_encoder(variables: T_Variables, a
     # add encoding for time bounds variables if present.
     _update_bounds_encoding(variables)
 
-    new_vars = {k: encode_cf_variable(v, name=k) for k, v in variables.items()}
+    new_vars = {}
+    for k, v in variables.items():
+        try:
+            new_vars[k] = encode_cf_variable(v, name=k)
+        except Exception as e:
+            e.add_note(f"Raised while encoding variable {k!r} with value {v!r}")
+            raise
 
     # Remove attrs from bounds variables (issue #2921)
     for var in new_vars.values():
@@ -803,8 +818,11 @@ def cf_encoder(variables: T_Variables, a
                 "leap_year",
                 "month_lengths",
             ]:
-                if attr in new_vars[bounds].attrs and attr in var.attrs:
-                    if new_vars[bounds].attrs[attr] == var.attrs[attr]:
-                        new_vars[bounds].attrs.pop(attr)
+                if (
+                    attr in new_vars[bounds].attrs
+                    and attr in var.attrs
+                    and new_vars[bounds].attrs[attr] == var.attrs[attr]
+                ):
+                    new_vars[bounds].attrs.pop(attr)
 
     return new_vars, attributes
diff -pruN 2025.03.1-8/xarray/convert.py 2025.10.1-1/xarray/convert.py
--- 2025.03.1-8/xarray/convert.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/xarray/convert.py	2025-10-10 10:38:05.000000000 +0000
@@ -138,7 +138,7 @@ def _iris_cell_methods_to_str(cell_metho
             f"interval: {interval}" for interval in cell_method.intervals
         )
         comments = " ".join(f"comment: {comment}" for comment in cell_method.comments)
-        extra = " ".join([intervals, comments]).strip()
+        extra = f"{intervals} {comments}".strip()
         if extra:
             extra = f" ({extra})"
         cell_methods.append(names + cell_method.method + extra)
diff -pruN 2025.03.1-8/xarray/core/_aggregations.py 2025.10.1-1/xarray/core/_aggregations.py
--- 2025.03.1-8/xarray/core/_aggregations.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/xarray/core/_aggregations.py	2025-10-10 10:38:05.000000000 +0000
@@ -1776,10 +1776,6 @@ class DatasetAggregations:
         :ref:`agg`
             User guide on reduction or aggregation operations.
 
-        Notes
-        -----
-        Non-numeric variables will be removed prior to reducing.
-
         Examples
         --------
         >>> da = xr.DataArray(
@@ -2948,10 +2944,6 @@ class DataArrayAggregations:
         :ref:`agg`
             User guide on reduction or aggregation operations.
 
-        Notes
-        -----
-        Non-numeric variables will be removed prior to reducing.
-
         Examples
         --------
         >>> da = xr.DataArray(
@@ -4231,8 +4223,6 @@ class DatasetGroupByAggregations:
         Pass flox-specific keyword arguments in ``**kwargs``.
         See the `flox documentation <https://flox.readthedocs.io>`_ for more.
 
-        Non-numeric variables will be removed prior to reducing.
-
         Examples
         --------
         >>> da = xr.DataArray(
@@ -5729,8 +5719,6 @@ class DatasetResampleAggregations:
         Pass flox-specific keyword arguments in ``**kwargs``.
         See the `flox documentation <https://flox.readthedocs.io>`_ for more.
 
-        Non-numeric variables will be removed prior to reducing.
-
         Examples
         --------
         >>> da = xr.DataArray(
@@ -7188,8 +7176,6 @@ class DataArrayGroupByAggregations:
         Pass flox-specific keyword arguments in ``**kwargs``.
         See the `flox documentation <https://flox.readthedocs.io>`_ for more.
 
-        Non-numeric variables will be removed prior to reducing.
-
         Examples
         --------
         >>> da = xr.DataArray(
@@ -8578,8 +8564,6 @@ class DataArrayResampleAggregations:
         Pass flox-specific keyword arguments in ``**kwargs``.
         See the `flox documentation <https://flox.readthedocs.io>`_ for more.
 
-        Non-numeric variables will be removed prior to reducing.
-
         Examples
         --------
         >>> da = xr.DataArray(
diff -pruN 2025.03.1-8/xarray/core/accessor_dt.py 2025.10.1-1/xarray/core/accessor_dt.py
--- 2025.03.1-8/xarray/core/accessor_dt.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/xarray/core/accessor_dt.py	2025-10-10 10:38:05.000000000 +0000
@@ -20,6 +20,8 @@ from xarray.core.variable import IndexVa
 from xarray.namedarray.utils import is_duck_dask_array
 
 if TYPE_CHECKING:
+    from typing import Self
+
     from numpy.typing import DTypeLike
 
     from xarray.core.dataarray import DataArray
@@ -204,7 +206,7 @@ def _strftime_through_cftimeindex(values
     values_as_cftimeindex = CFTimeIndex(duck_array_ops.ravel(values))
 
     field_values = values_as_cftimeindex.strftime(date_format)
-    return field_values.values.reshape(values.shape)
+    return field_values.to_numpy().reshape(values.shape)
 
 
 def _strftime_through_series(values, date_format: str):
@@ -213,7 +215,7 @@ def _strftime_through_series(values, dat
     """
     values_as_series = pd.Series(duck_array_ops.ravel(values), copy=False)
     strs = values_as_series.dt.strftime(date_format)
-    return strs.values.reshape(values.shape)
+    return strs.to_numpy().reshape(values.shape)
 
 
 def _strftime(values, date_format):
@@ -242,7 +244,7 @@ class TimeAccessor(Generic[T_DataArray])
     def __init__(self, obj: T_DataArray) -> None:
         self._obj = obj
 
-    def _date_field(self, name: str, dtype: DTypeLike) -> T_DataArray:
+    def _date_field(self, name: str, dtype: DTypeLike | None) -> T_DataArray:
         if dtype is None:
             dtype = self._obj.dtype
         result = _get_date_field(_index_or_data(self._obj), name, dtype)
@@ -650,7 +652,7 @@ class TimedeltaAccessor(TimeAccessor[T_D
 class CombinedDatetimelikeAccessor(
     DatetimeAccessor[T_DataArray], TimedeltaAccessor[T_DataArray]
 ):
-    def __new__(cls, obj: T_DataArray) -> CombinedDatetimelikeAccessor:
+    def __new__(cls, obj: T_DataArray) -> Self:
         # CombinedDatetimelikeAccessor isn't really instantiated. Instead
         # we need to choose which parent (datetime or timedelta) is
         # appropriate. Since we're checking the dtypes anyway, we'll just
diff -pruN 2025.03.1-8/xarray/core/accessor_str.py 2025.10.1-1/xarray/core/accessor_str.py
--- 2025.03.1-8/xarray/core/accessor_str.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/xarray/core/accessor_str.py	2025-10-10 10:38:05.000000000 +0000
@@ -112,7 +112,7 @@ def _apply_str_ufunc(
     *,
     func: Callable,
     obj: Any,
-    dtype: DTypeLike = None,
+    dtype: DTypeLike | None = None,
     output_core_dims: list | tuple = ((),),
     output_sizes: Mapping[Any, int] | None = None,
     func_args: tuple = (),
@@ -224,7 +224,7 @@ class StringAccessor(Generic[T_DataArray
         self,
         *,
         func: Callable,
-        dtype: DTypeLike = None,
+        dtype: DTypeLike | None = None,
         output_core_dims: list | tuple = ((),),
         output_sizes: Mapping[Any, int] | None = None,
         func_args: tuple = (),
@@ -349,7 +349,7 @@ class StringAccessor(Generic[T_DataArray
             islice = slice(-1, None) if iind == -1 else slice(iind, iind + 1)
             item = x[islice]
 
-            return item if item else default
+            return item or default
 
         return self._apply(func=f, func_args=(i,))
 
@@ -662,10 +662,11 @@ class StringAccessor(Generic[T_DataArray
         """
         args = tuple(self._stringify(x) for x in args)
         kwargs = {key: self._stringify(val) for key, val in kwargs.items()}
-        func = lambda x, *args, **kwargs: self._obj.dtype.type.format(
-            x, *args, **kwargs
+        return self._apply(
+            func=self._obj.dtype.type.format,
+            func_args=args,
+            func_kwargs={"kwargs": kwargs},
         )
-        return self._apply(func=func, func_args=args, func_kwargs={"kwargs": kwargs})
 
     def capitalize(self) -> T_DataArray:
         """
@@ -1944,7 +1945,7 @@ class StringAccessor(Generic[T_DataArray
         if regex:
             pat = self._re_compile(pat=pat, flags=flags, case=case)
             func = lambda x, ipat, irepl, i_n: ipat.sub(
-                repl=irepl, string=x, count=i_n if i_n >= 0 else 0
+                repl=irepl, string=x, count=max(i_n, 0)
             )
         else:
             pat = self._stringify(pat)
diff -pruN 2025.03.1-8/xarray/core/common.py 2025.10.1-1/xarray/core/common.py
--- 2025.03.1-8/xarray/core/common.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/xarray/core/common.py	2025-10-10 10:38:05.000000000 +0000
@@ -164,7 +164,7 @@ class AbstractArray:
         return complex(self.values)
 
     def __array__(
-        self: Any, dtype: np.typing.DTypeLike = None, /, *, copy: bool | None = None
+        self: Any, dtype: DTypeLike | None = None, /, *, copy: bool | None = None
     ) -> np.ndarray:
         if not copy:
             if np.lib.NumpyVersion(np.__version__) >= "2.0.0":
@@ -457,7 +457,7 @@ class DataWithCoords(AttrAccessMixin):
         numpy.squeeze
         """
         dims = get_squeeze_dims(self, dim, axis)
-        return self.isel(drop=drop, **{d: 0 for d in dims})
+        return self.isel(drop=drop, **dict.fromkeys(dims, 0))
 
     def clip(
         self,
@@ -619,9 +619,9 @@ class DataWithCoords(AttrAccessMixin):
         <xarray.Dataset> Size: 360B
         Dimensions:         (x: 2, y: 2, time: 4)
         Coordinates:
+          * time            (time) datetime64[ns] 32B 2014-09-06 ... 2014-09-09
             lon             (x, y) float64 32B 260.2 260.7 260.2 260.8
             lat             (x, y) float64 32B 42.25 42.21 42.63 42.59
-          * time            (time) datetime64[ns] 32B 2014-09-06 ... 2014-09-09
             reference_time  datetime64[ns] 8B 2014-09-05
         Dimensions without coordinates: x, y
         Data variables:
@@ -633,9 +633,9 @@ class DataWithCoords(AttrAccessMixin):
         <xarray.Dataset> Size: 360B
         Dimensions:         (x: 2, y: 2, time: 4)
         Coordinates:
+          * time            (time) datetime64[ns] 32B 2014-09-06 ... 2014-09-09
             lon             (x, y) float64 32B -99.83 -99.32 -99.79 -99.23
             lat             (x, y) float64 32B 42.25 42.21 42.63 42.59
-          * time            (time) datetime64[ns] 32B 2014-09-06 ... 2014-09-09
             reference_time  datetime64[ns] 8B 2014-09-05
         Dimensions without coordinates: x, y
         Data variables:
@@ -1118,7 +1118,7 @@ class DataWithCoords(AttrAccessMixin):
                 f"Received {type(freq)} instead."
             )
 
-        rgrouper = ResolvedGrouper(grouper, group, self, eagerly_compute_group=False)
+        rgrouper = ResolvedGrouper(grouper, group, self)
 
         return resample_cls(
             self,
@@ -1701,11 +1701,11 @@ def full_like(
 
     if isinstance(other, Dataset):
         if not isinstance(fill_value, dict):
-            fill_value = {k: fill_value for k in other.data_vars.keys()}
+            fill_value = dict.fromkeys(other.data_vars.keys(), fill_value)
 
         dtype_: Mapping[Any, DTypeLikeSave]
         if not isinstance(dtype, Mapping):
-            dtype_ = {k: dtype for k in other.data_vars.keys()}
+            dtype_ = dict.fromkeys(other.data_vars.keys(), dtype)
         else:
             dtype_ = dtype
 
@@ -1782,7 +1782,7 @@ def _full_like_variable(
             other.shape,
             fill_value,
             dtype=dtype,
-            chunks=chunks if chunks else other.data.chunks,
+            chunks=chunks or other.data.chunks,
             **from_array_kwargs,
         )
     else:
@@ -2073,18 +2073,18 @@ def get_chunksizes(
     return Frozen(chunks)
 
 
-def is_np_datetime_like(dtype: DTypeLike) -> bool:
+def is_np_datetime_like(dtype: DTypeLike | None) -> bool:
     """Check if a dtype is a subclass of the numpy datetime types"""
     return np.issubdtype(dtype, np.datetime64) or np.issubdtype(dtype, np.timedelta64)
 
 
-def is_np_timedelta_like(dtype: DTypeLike) -> bool:
+def is_np_timedelta_like(dtype: DTypeLike | None) -> bool:
     """Check whether dtype is of the timedelta64 dtype."""
     return np.issubdtype(dtype, np.timedelta64)
 
 
 def _contains_cftime_datetimes(array: Any) -> bool:
-    """Check if a array inside a Variable contains cftime.datetime objects"""
+    """Check if an array inside a Variable contains cftime.datetime objects"""
     if cftime is None:
         return False
 
@@ -2108,3 +2108,21 @@ def _contains_datetime_like_objects(var:
     np.datetime64, np.timedelta64, or cftime.datetime)
     """
     return is_np_datetime_like(var.dtype) or contains_cftime_datetimes(var)
+
+
+def _is_numeric_aggregatable_dtype(var: T_Variable) -> bool:
+    """Check if a variable's dtype can be used in numeric aggregations like mean().
+
+    This includes:
+    - Numeric types (int, float, complex)
+    - Boolean type
+    - Datetime types (datetime64, timedelta64)
+    - Object arrays containing datetime-like objects (e.g., cftime)
+    """
+    return (
+        np.issubdtype(var.dtype, np.number)
+        or (var.dtype == np.bool_)
+        or np.issubdtype(var.dtype, np.datetime64)
+        or np.issubdtype(var.dtype, np.timedelta64)
+        or _contains_cftime_datetimes(var._data)
+    )
diff -pruN 2025.03.1-8/xarray/core/coordinate_transform.py 2025.10.1-1/xarray/core/coordinate_transform.py
--- 2025.03.1-8/xarray/core/coordinate_transform.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/xarray/core/coordinate_transform.py	2025-10-10 10:38:05.000000000 +0000
@@ -1,5 +1,7 @@
+from __future__ import annotations
+
 from collections.abc import Hashable, Iterable, Mapping
-from typing import Any
+from typing import Any, overload
 
 import numpy as np
 
@@ -7,8 +9,9 @@ import numpy as np
 class CoordinateTransform:
     """Abstract coordinate transform with dimension & coordinate names.
 
-    EXPERIMENTAL (not ready for public use yet).
-
+    .. caution::
+        This API is experimental and subject to change. Please report any bugs or surprising
+        behaviour you encounter.
     """
 
     coord_names: tuple[Hashable, ...]
@@ -64,8 +67,30 @@ class CoordinateTransform:
         """
         raise NotImplementedError
 
-    def equals(self, other: "CoordinateTransform") -> bool:
-        """Check equality with another CoordinateTransform of the same kind."""
+    @overload
+    def equals(self, other: CoordinateTransform) -> bool: ...
+
+    @overload
+    def equals(
+        self, other: CoordinateTransform, *, exclude: frozenset[Hashable] | None = None
+    ) -> bool: ...
+
+    def equals(self, other: CoordinateTransform, **kwargs) -> bool:
+        """Check equality with another CoordinateTransform of the same kind.
+
+        Parameters
+        ----------
+        other : CoordinateTransform
+            The other CoordinateTransform object to compare with this object.
+        exclude : frozenset of hashable, optional
+            Dimensions excluded from checking. It is None by default, (i.e.,
+            when this method is not called in the context of alignment). For a
+            n-dimensional transform this option allows a CoordinateTransform to
+            optionally ignore any dimension in ``exclude`` when comparing
+            ``self`` with ``other``. For a 1-dimensional transform this kwarg
+            can be safely ignored, as this method is not called when all of the
+            transform's dimensions are also excluded from alignment.
+        """
         raise NotImplementedError
 
     def generate_coords(
diff -pruN 2025.03.1-8/xarray/core/coordinates.py 2025.10.1-1/xarray/core/coordinates.py
--- 2025.03.1-8/xarray/core/coordinates.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/xarray/core/coordinates.py	2025-10-10 10:38:05.000000000 +0000
@@ -1,6 +1,6 @@
 from __future__ import annotations
 
-from collections.abc import Hashable, Iterator, Mapping, Sequence
+from collections.abc import Callable, Hashable, Iterable, Iterator, Mapping, Sequence
 from contextlib import contextmanager
 from typing import (
     TYPE_CHECKING,
@@ -21,7 +21,7 @@ from xarray.core.indexes import (
     assert_no_index_corrupted,
     create_default_index_implicit,
 )
-from xarray.core.types import DataVars, Self, T_DataArray, T_Xarray
+from xarray.core.types import DataVars, ErrorOptions, Self, T_DataArray, T_Xarray
 from xarray.core.utils import (
     Frozen,
     ReprObject,
@@ -177,13 +177,17 @@ class AbstractCoordinates(Mapping[Hashab
 
                 # compute the cartesian product
                 code_list += [
-                    np.tile(np.repeat(code, repeat_counts[i]), tile_counts[i]).tolist()
+                    np.tile(np.repeat(code, repeat_counts[i]), tile_counts[i])
                     for code in codes
                 ]
                 level_list += levels
                 names += index.names
 
-        return pd.MultiIndex(levels=level_list, codes=code_list, names=names)
+        return pd.MultiIndex(
+            levels=level_list,  # type: ignore[arg-type,unused-ignore]
+            codes=[list(c) for c in code_list],
+            names=names,
+        )
 
 
 class Coordinates(AbstractCoordinates):
@@ -309,7 +313,7 @@ class Coordinates(AbstractCoordinates):
                 var = as_variable(data, name=name, auto_convert=False)
                 if var.dims == (name,) and indexes is None:
                     index, index_vars = create_default_index_implicit(var, list(coords))
-                    default_indexes.update({k: index for k in index_vars})
+                    default_indexes.update(dict.fromkeys(index_vars, index))
                     variables.update(index_vars)
                 else:
                     variables[name] = var
@@ -384,7 +388,7 @@ class Coordinates(AbstractCoordinates):
                 f"create any coordinate.\n{index!r}"
             )
 
-        indexes = {name: index for name in variables}
+        indexes = dict.fromkeys(variables, index)
 
         return cls(coords=variables, indexes=indexes)
 
@@ -412,7 +416,7 @@ class Coordinates(AbstractCoordinates):
         xr_idx = PandasMultiIndex(midx, dim)
 
         variables = xr_idx.create_variables()
-        indexes = {k: xr_idx for k in variables}
+        indexes = dict.fromkeys(variables, xr_idx)
 
         return cls(coords=variables, indexes=indexes)
 
@@ -486,7 +490,7 @@ class Coordinates(AbstractCoordinates):
         return self.to_dataset().identical(other.to_dataset())
 
     def _update_coords(
-        self, coords: dict[Hashable, Variable], indexes: Mapping[Any, Index]
+        self, coords: dict[Hashable, Variable], indexes: dict[Hashable, Index]
     ) -> None:
         # redirect to DatasetCoordinates._update_coords
         self._data.coords._update_coords(coords, indexes)
@@ -561,6 +565,35 @@ class Coordinates(AbstractCoordinates):
             variables=coords, coord_names=coord_names, indexes=indexes
         )
 
+    def __or__(self, other: Mapping[Any, Any] | None) -> Coordinates:
+        """Merge two sets of coordinates to create a new Coordinates object
+
+        The method implements the logic used for joining coordinates in the
+        result of a binary operation performed on xarray objects:
+
+        - If two index coordinates conflict (are not equal), an exception is
+          raised. You must align your data before passing it to this method.
+        - If an index coordinate and a non-index coordinate conflict, the non-
+          index coordinate is dropped.
+        - If two non-index coordinates conflict, both are dropped.
+
+        Parameters
+        ----------
+        other : dict-like, optional
+            A :py:class:`Coordinates` object or any mapping that can be turned
+            into coordinates.
+
+        Returns
+        -------
+        merged : Coordinates
+            A new Coordinates object with merged coordinates.
+
+        See Also
+        --------
+        Coordinates.merge
+        """
+        return self.merge(other).coords
+
     def __setitem__(self, key: Hashable, value: Any) -> None:
         self.update({key: value})
 
@@ -719,6 +752,108 @@ class Coordinates(AbstractCoordinates):
             ),
         )
 
+    def drop_vars(
+        self,
+        names: str
+        | Iterable[Hashable]
+        | Callable[
+            [Coordinates | Dataset | DataArray | DataTree],
+            str | Iterable[Hashable],
+        ],
+        *,
+        errors: ErrorOptions = "raise",
+    ) -> Self:
+        """Drop variables from this Coordinates object.
+
+        Note that indexes that depend on these variables will also be dropped.
+
+        Parameters
+        ----------
+        names : hashable or iterable or callable
+            Name(s) of variables to drop. If a callable, this is object is passed as its
+            only argument and its result is used.
+        errors : {"raise", "ignore"}, default: "raise"
+            Error treatment.
+
+            - ``'raise'``: raises a :py:class:`ValueError` error if any of the variable
+              passed are not in the dataset
+            - ``'ignore'``: any given names that are in the dataset are dropped and no
+              error is raised.
+        """
+        return cast(Self, self.to_dataset().drop_vars(names, errors=errors).coords)
+
+    def drop_dims(
+        self,
+        drop_dims: str | Iterable[Hashable],
+        *,
+        errors: ErrorOptions = "raise",
+    ) -> Self:
+        """Drop dimensions and associated variables from this dataset.
+
+        Parameters
+        ----------
+        drop_dims : str or Iterable of Hashable
+            Dimension or dimensions to drop.
+        errors : {"raise", "ignore"}, default: "raise"
+            If 'raise', raises a ValueError error if any of the
+            dimensions passed are not in the dataset. If 'ignore', any given
+            dimensions that are in the dataset are dropped and no error is raised.
+
+        Returns
+        -------
+        obj : Coordinates
+            Coordinates object without the given dimensions (or any coordinates
+            containing those dimensions).
+        """
+        return cast(Self, self.to_dataset().drop_dims(drop_dims, errors=errors).coords)
+
+    def rename_dims(
+        self,
+        dims_dict: Mapping[Any, Hashable] | None = None,
+        **dims: Hashable,
+    ) -> Self:
+        """Returns a new object with renamed dimensions only.
+
+        Parameters
+        ----------
+        dims_dict : dict-like, optional
+            Dictionary whose keys are current dimension names and
+            whose values are the desired names. The desired names must
+            not be the name of an existing dimension or Variable in the Coordinates.
+        **dims : optional
+            Keyword form of ``dims_dict``.
+            One of dims_dict or dims must be provided.
+
+        Returns
+        -------
+        renamed : Coordinates
+            Coordinates object with renamed dimensions.
+        """
+        return cast(Self, self.to_dataset().rename_dims(dims_dict, **dims).coords)
+
+    def rename_vars(
+        self,
+        name_dict: Mapping[Any, Hashable] | None = None,
+        **names: Hashable,
+    ) -> Coordinates:
+        """Returns a new object with renamed variables.
+
+        Parameters
+        ----------
+        name_dict : dict-like, optional
+            Dictionary whose keys are current variable or coordinate names and
+            whose values are the desired names.
+        **names : optional
+            Keyword form of ``name_dict``.
+            One of name_dict or names must be provided.
+
+        Returns
+        -------
+        renamed : Coordinates
+            Coordinates object with renamed variables
+        """
+        return cast(Self, self.to_dataset().rename_vars(name_dict, **names).coords)
+
 
 class DatasetCoordinates(Coordinates):
     """Dictionary like container for Dataset coordinates (variables + indexes).
@@ -780,7 +915,7 @@ class DatasetCoordinates(Coordinates):
         return self._data._copy_listed(names)
 
     def _update_coords(
-        self, coords: dict[Hashable, Variable], indexes: Mapping[Any, Index]
+        self, coords: dict[Hashable, Variable], indexes: dict[Hashable, Index]
     ) -> None:
         variables = self._data._variables.copy()
         variables.update(coords)
@@ -788,7 +923,7 @@ class DatasetCoordinates(Coordinates):
         # check for inconsistent state *before* modifying anything in-place
         dims = calculate_dimensions(variables)
         new_coord_names = set(coords)
-        for dim in dims.keys():
+        for dim in dims:
             if dim in variables:
                 new_coord_names.add(dim)
 
@@ -880,7 +1015,7 @@ class DataTreeCoordinates(Coordinates):
         return self._data.dataset._copy_listed(self._names)
 
     def _update_coords(
-        self, coords: dict[Hashable, Variable], indexes: Mapping[Any, Index]
+        self, coords: dict[Hashable, Variable], indexes: dict[Hashable, Index]
     ) -> None:
         from xarray.core.datatree import check_alignment
 
@@ -964,22 +1099,14 @@ class DataArrayCoordinates(Coordinates,
         return self._data._getitem_coord(key)
 
     def _update_coords(
-        self, coords: dict[Hashable, Variable], indexes: Mapping[Any, Index]
+        self, coords: dict[Hashable, Variable], indexes: dict[Hashable, Index]
     ) -> None:
-        coords_plus_data = coords.copy()
-        coords_plus_data[_THIS_ARRAY] = self._data.variable
-        dims = calculate_dimensions(coords_plus_data)
-        if not set(dims) <= set(self.dims):
-            raise ValueError(
-                "cannot add coordinates with new dimensions to a DataArray"
-            )
-        self._data._coords = coords
+        validate_dataarray_coords(
+            self._data.shape, Coordinates._construct_direct(coords, indexes), self.dims
+        )
 
-        # TODO(shoyer): once ._indexes is always populated by a dict, modify
-        # it to update inplace instead.
-        original_indexes = dict(self._data.xindexes)
-        original_indexes.update(indexes)
-        self._data._indexes = original_indexes
+        self._data._coords = coords
+        self._data._indexes = indexes
 
     def _drop_coords(self, coord_names):
         # should drop indexed coordinates only
@@ -1035,10 +1162,10 @@ def drop_indexed_coords(
         if isinstance(idx, PandasMultiIndex) and idx_drop_coords == {idx.dim}:
             idx_drop_coords.update(idx.index.names)
             emit_user_level_warning(
-                f"updating coordinate {idx.dim!r} with a PandasMultiIndex would leave "
+                f"updating coordinate {idx.dim!r}, which is a PandasMultiIndex, would leave "
                 f"the multi-index level coordinates {list(idx.index.names)!r} in an inconsistent state. "
-                f"This will raise an error in the future. Use `.drop_vars({list(idx_coords)!r})` before "
-                "assigning new coordinate values.",
+                f"This will raise an error in the future. Use `.drop_vars({list(idx_coords)!r})` "
+                "to drop the coordinates' values before assigning new coordinate values.",
                 FutureWarning,
             )
 
@@ -1134,7 +1261,7 @@ def create_coords_with_default_indexes(
             # pandas multi-index edge cases.
             variable = variable.to_index_variable()
             idx, idx_vars = create_default_index_implicit(variable, all_variables)
-            indexes.update({k: idx for k in idx_vars})
+            indexes.update(dict.fromkeys(idx_vars, idx))
             variables.update(idx_vars)
             all_variables.update(idx_vars)
         else:
@@ -1154,12 +1281,61 @@ def create_coords_with_default_indexes(
     return new_coords
 
 
-def _coordinates_from_variable(variable: Variable) -> Coordinates:
-    from xarray.core.indexes import create_default_index_implicit
+class CoordinateValidationError(ValueError):
+    """Error class for Xarray coordinate validation failures."""
+
+
+def validate_dataarray_coords(
+    shape: tuple[int, ...],
+    coords: Coordinates | Mapping[Hashable, Variable],
+    dim: tuple[Hashable, ...],
+):
+    """Validate coordinates ``coords`` to include in a DataArray defined by
+    ``shape`` and dimensions ``dim``.
+
+    If a coordinate is associated with an index, the validation is performed by
+    the index. By default the coordinate dimensions must match (a subset of) the
+    array dimensions (in any order) to conform to the DataArray model. The index
+    may override this behavior with other validation rules, though.
+
+    Non-index coordinates must all conform to the DataArray model. Scalar
+    coordinates are always valid.
+    """
+    sizes = dict(zip(dim, shape, strict=True))
+    dim_set = set(dim)
+
+    indexes: Mapping[Hashable, Index]
+    if isinstance(coords, Coordinates):
+        indexes = coords.xindexes
+    else:
+        indexes = {}
+
+    for k, v in coords.items():
+        if k in indexes:
+            invalid = not indexes[k].should_add_coord_to_array(k, v, dim_set)
+        else:
+            invalid = any(d not in dim for d in v.dims)
+
+        if invalid:
+            raise CoordinateValidationError(
+                f"coordinate {k} has dimensions {v.dims}, but these "
+                "are not a subset of the DataArray "
+                f"dimensions {dim}"
+            )
+
+        for d, s in v.sizes.items():
+            if d in sizes and s != sizes[d]:
+                raise CoordinateValidationError(
+                    f"conflicting sizes for dimension {d!r}: "
+                    f"length {sizes[d]} on the data but length {s} on "
+                    f"coordinate {k!r}"
+                )
+
 
+def coordinates_from_variable(variable: Variable) -> Coordinates:
     (name,) = variable.dims
     new_index, index_vars = create_default_index_implicit(variable)
-    indexes = {k: new_index for k in index_vars}
+    indexes = dict.fromkeys(index_vars, new_index)
     new_vars = new_index.create_variables()
     new_vars[name].attrs = variable.attrs
     return Coordinates(new_vars, indexes)
diff -pruN 2025.03.1-8/xarray/core/dataarray.py 2025.10.1-1/xarray/core/dataarray.py
--- 2025.03.1-8/xarray/core/dataarray.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/xarray/core/dataarray.py	2025-10-10 10:38:05.000000000 +0000
@@ -14,15 +14,7 @@ from collections.abc import (
 from functools import partial
 from os import PathLike
 from types import EllipsisType
-from typing import (
-    TYPE_CHECKING,
-    Any,
-    Generic,
-    Literal,
-    NoReturn,
-    TypeVar,
-    overload,
-)
+from typing import TYPE_CHECKING, Any, Generic, Literal, NoReturn, TypeVar, overload
 
 import numpy as np
 import pandas as pd
@@ -41,6 +33,7 @@ from xarray.core.coordinates import (
     DataArrayCoordinates,
     assert_coordinate_consistent,
     create_coords_with_default_indexes,
+    validate_dataarray_coords,
 )
 from xarray.core.dataset import Dataset
 from xarray.core.extension_array import PandasExtensionArray
@@ -132,25 +125,6 @@ if TYPE_CHECKING:
     T_XarrayOther = TypeVar("T_XarrayOther", bound="DataArray" | Dataset)
 
 
-def _check_coords_dims(shape, coords, dim):
-    sizes = dict(zip(dim, shape, strict=True))
-    for k, v in coords.items():
-        if any(d not in dim for d in v.dims):
-            raise ValueError(
-                f"coordinate {k} has dimensions {v.dims}, but these "
-                "are not a subset of the DataArray "
-                f"dimensions {dim}"
-            )
-
-        for d, s in v.sizes.items():
-            if s != sizes[d]:
-                raise ValueError(
-                    f"conflicting sizes for dimension {d!r}: "
-                    f"length {sizes[d]} on the data but length {s} on "
-                    f"coordinate {k!r}"
-                )
-
-
 def _infer_coords_and_dims(
     shape: tuple[int, ...],
     coords: (
@@ -214,7 +188,7 @@ def _infer_coords_and_dims(
                 var.dims = (dim,)
                 new_coords[dim] = var.to_index_variable()
 
-    _check_coords_dims(shape, new_coords, dims_tuple)
+    validate_dataarray_coords(shape, new_coords, dims_tuple)
 
     return new_coords, dims_tuple
 
@@ -353,7 +327,7 @@ class DataArray(
         Attributes to assign to the new instance. By default, an empty
         attribute dictionary is initialized.
         (see FAQ, :ref:`approach to metadata`)
-    indexes : py:class:`~xarray.Indexes` or dict-like, optional
+    indexes : :py:class:`~xarray.Indexes` or dict-like, optional
         For internal use only. For passing indexes objects to the
         new DataArray, use the ``coords`` argument instead with a
         :py:class:`~xarray.Coordinate` object (both coordinate variables
@@ -394,9 +368,9 @@ class DataArray(
            [[22.60070734, 13.78914233, 14.17424919],
             [18.28478802, 16.15234857, 26.63418806]]])
     Coordinates:
+      * time            (time) datetime64[ns] 24B 2014-09-06 2014-09-07 2014-09-08
         lon             (x, y) float64 32B -99.83 -99.32 -99.79 -99.23
         lat             (x, y) float64 32B 42.25 42.21 42.63 42.59
-      * time            (time) datetime64[ns] 24B 2014-09-06 2014-09-07 2014-09-08
         reference_time  datetime64[ns] 8B 2014-09-05
     Dimensions without coordinates: x, y
     Attributes:
@@ -524,7 +498,7 @@ class DataArray(
         self,
         variable: Variable | None = None,
         coords=None,
-        name: Hashable | None | Default = _default,
+        name: Hashable | Default | None = _default,
         attrs=_default,
         indexes=None,
     ) -> Self:
@@ -546,7 +520,7 @@ class DataArray(
     def _replace_maybe_drop_dims(
         self,
         variable: Variable,
-        name: Hashable | None | Default = _default,
+        name: Hashable | Default | None = _default,
     ) -> Self:
         if self.sizes == variable.sizes:
             coords = self._coords.copy()
@@ -607,7 +581,7 @@ class DataArray(
         return self._to_dataset_whole(name=_THIS_ARRAY, shallow_copy=False)
 
     def _from_temp_dataset(
-        self, dataset: Dataset, name: Hashable | None | Default = _default
+        self, dataset: Dataset, name: Hashable | Default | None = _default
     ) -> Self:
         variable = dataset._variables.pop(_THIS_ARRAY)
         coords = dataset._variables
@@ -1161,10 +1135,11 @@ class DataArray(
         return cls(variable, coords, name=name, indexes=indexes, fastpath=True)
 
     def load(self, **kwargs) -> Self:
-        """Manually trigger loading of this array's data from disk or a
-        remote source into memory and return this array.
+        """Trigger loading data into memory and return this dataarray.
+
+        Data will be computed and/or loaded from disk or a remote source.
 
-        Unlike compute, the original dataset is modified and returned.
+        Unlike ``.compute``, the original dataarray is modified and returned.
 
         Normally, it should not be necessary to call this method in user code,
         because all xarray functions should either work on deferred data or
@@ -1176,9 +1151,18 @@ class DataArray(
         **kwargs : dict
             Additional keyword arguments passed on to ``dask.compute``.
 
+        Returns
+        -------
+        object : DataArray
+            Same object but with lazy data and coordinates as in-memory arrays.
+
         See Also
         --------
         dask.compute
+        DataArray.load_async
+        DataArray.compute
+        Dataset.load
+        Variable.load
         """
         ds = self._to_temp_dataset().load(**kwargs)
         new = self._from_temp_dataset(ds)
@@ -1186,11 +1170,49 @@ class DataArray(
         self._coords = new._coords
         return self
 
+    async def load_async(self, **kwargs) -> Self:
+        """Trigger and await asynchronous loading of data into memory and return this dataarray.
+
+        Data will be computed and/or loaded from disk or a remote source.
+
+        Unlike ``.compute``, the original dataarray is modified and returned.
+
+        Only works when opening data lazily from IO storage backends which support lazy asynchronous loading.
+        Otherwise will raise a NotImplementedError.
+
+        Note users are expected to limit concurrency themselves - xarray does not internally limit concurrency in any way.
+
+        Parameters
+        ----------
+        **kwargs : dict
+            Additional keyword arguments passed on to ``dask.compute``.
+
+        Returns
+        -------
+        object : Dataarray
+            Same object but with lazy data and coordinates as in-memory arrays.
+
+        See Also
+        --------
+        dask.compute
+        DataArray.compute
+        DataArray.load
+        Dataset.load_async
+        Variable.load_async
+        """
+        temp_ds = self._to_temp_dataset()
+        ds = await temp_ds.load_async(**kwargs)
+        new = self._from_temp_dataset(ds)
+        self._variable = new._variable
+        self._coords = new._coords
+        return self
+
     def compute(self, **kwargs) -> Self:
-        """Manually trigger loading of this array's data from disk or a
-        remote source into memory and return a new array.
+        """Trigger loading data into memory and return a new dataarray.
 
-        Unlike load, the original is left unaltered.
+        Data will be computed and/or loaded from disk or a remote source.
+
+        Unlike ``.load``, the original dataarray is left unaltered.
 
         Normally, it should not be necessary to call this method in user code,
         because all xarray functions should either work on deferred data or
@@ -1210,6 +1232,10 @@ class DataArray(
         See Also
         --------
         dask.compute
+        DataArray.load
+        DataArray.load_async
+        Dataset.compute
+        Variable.compute
         """
         new = self.copy(deep=False)
         return new.load(**kwargs)
@@ -2113,8 +2139,8 @@ class DataArray(
         <xarray.DataArray (y: 3)> Size: 24B
         array([3, 4, 5])
         Coordinates:
-            x        int64 8B 20
           * y        (y) int64 24B 70 80 90
+            x        int64 8B 20
 
         ...so ``b`` in not added here:
 
@@ -2122,8 +2148,8 @@ class DataArray(
         <xarray.DataArray (y: 3)> Size: 24B
         array([3, 4, 5])
         Coordinates:
-            x        int64 8B 20
           * y        (y) int64 24B 70 80 90
+            x        int64 8B 20
 
         See Also
         --------
@@ -2337,8 +2363,8 @@ class DataArray(
                [3.  ,  nan, 5.75,  nan],
                [5.  ,  nan, 5.25,  nan]])
         Coordinates:
-          * y        (y) int64 32B 10 12 14 16
           * x        (x) float64 32B 0.0 0.75 1.25 1.75
+          * y        (y) int64 32B 10 12 14 16
 
         1D nearest interpolation:
 
@@ -2349,8 +2375,8 @@ class DataArray(
                [ 2.,  7.,  6., nan],
                [ 6., nan,  5.,  8.]])
         Coordinates:
-          * y        (y) int64 32B 10 12 14 16
           * x        (x) float64 32B 0.0 0.75 1.25 1.75
+          * y        (y) int64 32B 10 12 14 16
 
         1D linear extrapolation:
 
@@ -2365,8 +2391,8 @@ class DataArray(
                [ 8. ,  nan,  4.5,  nan],
                [12. ,  nan,  3.5,  nan]])
         Coordinates:
-          * y        (y) int64 32B 10 12 14 16
           * x        (x) float64 32B 1.0 1.5 2.5 3.5
+          * y        (y) int64 32B 10 12 14 16
 
         2D linear interpolation:
 
@@ -2613,8 +2639,8 @@ class DataArray(
         <xarray.DataArray (y: 2)> Size: 16B
         array([0, 1])
         Coordinates:
-            x        (y) <U1 8B 'a' 'b'
           * y        (y) int64 16B 0 1
+            x        (y) <U1 8B 'a' 'b'
 
         >>> arr.swap_dims({"x": "z"})
         <xarray.DataArray (z: 2)> Size: 16B
@@ -2635,8 +2661,8 @@ class DataArray(
 
     def expand_dims(
         self,
-        dim: None | Hashable | Sequence[Hashable] | Mapping[Any, Any] = None,
-        axis: None | int | Sequence[int] = None,
+        dim: Hashable | Sequence[Hashable] | Mapping[Any, Any] | None = None,
+        axis: int | Sequence[int] | None = None,
         create_index_for_new_dim: bool = True,
         **dim_kwargs: Any,
     ) -> Self:
@@ -4041,7 +4067,7 @@ class DataArray(
         compute: bool = True,
         invalid_netcdf: bool = False,
         auto_complex: bool | None = None,
-    ) -> bytes: ...
+    ) -> memoryview: ...
 
     # compute=False returns dask.Delayed
     @overload
@@ -4105,17 +4131,15 @@ class DataArray(
         compute: bool = True,
         invalid_netcdf: bool = False,
         auto_complex: bool | None = None,
-    ) -> bytes | Delayed | None:
+    ) -> memoryview | Delayed | None:
         """Write DataArray contents to a netCDF file.
 
         Parameters
         ----------
-        path : str, path-like or None, optional
-            Path to which to save this dataset. File-like objects are only
-            supported by the scipy engine. If no path is provided, this
-            function returns the resulting netCDF file as bytes; in this case,
-            we need to use scipy, which does not support netCDF version 4 (the
-            default format becomes NETCDF3_64BIT).
+        path : str, path-like, file-like or None, optional
+            Path to which to save this datatree, or a file-like object to write
+            it to (which must support read and write and be seekable) or None
+            (default) to return in-memory bytes as a memoryview.
         mode : {"w", "a"}, default: "w"
             Write ('w') or append ('a') mode. If mode='w', any existing file at
             this location will be overwritten. If mode='a', existing variables
@@ -4144,10 +4168,11 @@ class DataArray(
         group : str, optional
             Path to the netCDF4 group in the given file to open (only works for
             format='NETCDF4'). The group(s) will be created if necessary.
-        engine : {"netcdf4", "scipy", "h5netcdf"}, optional
+        engine : {"netcdf4", "h5netcdf", "scipy"}, optional
             Engine to use when writing netCDF files. If not provided, the
-            default engine is chosen based on available dependencies, with a
-            preference for 'netcdf4' if writing to a file on disk.
+            default engine is chosen based on available dependencies, by default
+            preferring "netcdf4" over "h5netcdf" over "scipy" (customizable via
+            ``netcdf_engine_order`` in ``xarray.set_options()``).
         encoding : dict, optional
             Nested dictionary with variable names as keys and dictionaries of
             variable specific encodings as values, e.g.,
@@ -4175,8 +4200,7 @@ class DataArray(
 
         Returns
         -------
-        store: bytes or Delayed or None
-            * ``bytes`` if path is None
+            * ``memoryview`` if path is None
             * ``dask.delayed.Delayed`` if compute is False
             * None otherwise
 
@@ -4195,7 +4219,8 @@ class DataArray(
         --------
         Dataset.to_netcdf
         """
-        from xarray.backends.api import DATAARRAY_NAME, DATAARRAY_VARIABLE, to_netcdf
+        from xarray.backends.api import DATAARRAY_NAME, DATAARRAY_VARIABLE
+        from xarray.backends.writers import to_netcdf
 
         if self.name is None:
             # If no name is set then use a generic xarray name
@@ -4240,6 +4265,7 @@ class DataArray(
         append_dim: Hashable | None = None,
         region: Mapping[str, slice | Literal["auto"]] | Literal["auto"] | None = None,
         safe_chunks: bool = True,
+        align_chunks: bool = False,
         storage_options: dict[str, str] | None = None,
         zarr_version: int | None = None,
         zarr_format: int | None = None,
@@ -4263,6 +4289,7 @@ class DataArray(
         append_dim: Hashable | None = None,
         region: Mapping[str, slice | Literal["auto"]] | Literal["auto"] | None = None,
         safe_chunks: bool = True,
+        align_chunks: bool = False,
         storage_options: dict[str, str] | None = None,
         zarr_version: int | None = None,
         zarr_format: int | None = None,
@@ -4284,6 +4311,7 @@ class DataArray(
         append_dim: Hashable | None = None,
         region: Mapping[str, slice | Literal["auto"]] | Literal["auto"] | None = None,
         safe_chunks: bool = True,
+        align_chunks: bool = False,
         storage_options: dict[str, str] | None = None,
         zarr_version: int | None = None,
         zarr_format: int | None = None,
@@ -4385,6 +4413,16 @@ class DataArray(
             two or more chunked arrays in the same location in parallel if they are
             not writing in independent regions, for those cases it is better to use
             a synchronizer.
+        align_chunks: bool, default False
+            If True, rechunks the Dask array to align with Zarr chunks before writing.
+            This ensures each Dask chunk maps to one or more contiguous Zarr chunks,
+            which avoids race conditions.
+            Internally, the process sets safe_chunks=False and tries to preserve
+            the original Dask chunking as much as possible.
+            Note: While this alignment avoids write conflicts stemming from chunk
+            boundary misalignment, it does not protect against race conditions
+            if multiple uncoordinated processes write to the same
+            Zarr array concurrently.
         storage_options : dict, optional
             Any additional parameters for the storage backend (ignored for local
             paths).
@@ -4449,7 +4487,8 @@ class DataArray(
         :ref:`io.zarr`
             The I/O user guide, with more details and examples.
         """
-        from xarray.backends.api import DATAARRAY_NAME, DATAARRAY_VARIABLE, to_zarr
+        from xarray.backends.api import DATAARRAY_NAME, DATAARRAY_VARIABLE
+        from xarray.backends.writers import to_zarr
 
         if self.name is None:
             # If no name is set then use a generic xarray name
@@ -4476,6 +4515,7 @@ class DataArray(
             append_dim=append_dim,
             region=region,
             safe_chunks=safe_chunks,
+            align_chunks=align_chunks,
             storage_options=storage_options,
             zarr_version=zarr_version,
             zarr_format=zarr_format,
@@ -4812,8 +4852,8 @@ class DataArray(
         except (TypeError, AttributeError):
             return False
 
-    def __array_wrap__(self, obj, context=None) -> Self:
-        new_var = self.variable.__array_wrap__(obj, context)
+    def __array_wrap__(self, obj, context=None, return_scalar=False) -> Self:
+        new_var = self.variable.__array_wrap__(obj, context, return_scalar)
         return self._replace(new_var)
 
     def __matmul__(self, obj: T_Xarray) -> T_Xarray:
@@ -5329,8 +5369,8 @@ class DataArray(
                [3.6 , 5.75, 6.  , 1.7 ],
                [6.5 , 7.3 , 9.4 , 1.9 ]])
         Coordinates:
-          * y         (y) float64 32B 1.0 1.5 2.0 2.5
           * quantile  (quantile) float64 24B 0.0 0.5 1.0
+          * y         (y) float64 32B 1.0 1.5 2.0 2.5
 
         References
         ----------
@@ -5472,7 +5512,7 @@ class DataArray(
         ----------
         coord : Hashable, or sequence of Hashable
             Coordinate(s) used for the integration.
-        datetime_unit : {'Y', 'M', 'W', 'D', 'h', 'm', 's', 'ms', 'us', 'ns', \
+        datetime_unit : {'W', 'D', 'h', 'm', 's', 'ms', 'us', 'ns', \
                         'ps', 'fs', 'as', None}, optional
             Specify the unit if a datetime coordinate is used.
 
@@ -5529,7 +5569,7 @@ class DataArray(
         ----------
         coord : Hashable, or sequence of Hashable
             Coordinate(s) used for the integration.
-        datetime_unit : {'Y', 'M', 'W', 'D', 'h', 'm', 's', 'ms', 'us', 'ns', \
+        datetime_unit : {'W', 'D', 'h', 'm', 's', 'ms', 'us', 'ns', \
                         'ps', 'fs', 'as', None}, optional
             Specify the unit if a datetime coordinate is used.
 
@@ -6418,7 +6458,7 @@ class DataArray(
         """
         Curve fitting optimization for arbitrary functions.
 
-        Wraps `scipy.optimize.curve_fit` with `apply_ufunc`.
+        Wraps :py:func:`scipy.optimize.curve_fit` with :py:func:`~xarray.apply_ufunc`.
 
         Parameters
         ----------
@@ -6558,6 +6598,9 @@ class DataArray(
         --------
         DataArray.polyfit
         scipy.optimize.curve_fit
+        xarray.DataArray.xlm.modelfit
+            External method from `xarray-lmfit <https://xarray-lmfit.readthedocs.io/>`_
+            with more curve fitting functionality.
         """
         # For DataArray, use the original implementation by converting to a dataset first
         return self._to_temp_dataset().curvefit(
@@ -6813,7 +6856,7 @@ class DataArray(
         *,
         squeeze: Literal[False] = False,
         restore_coord_dims: bool = False,
-        eagerly_compute_group: bool = True,
+        eagerly_compute_group: Literal[False] | None = None,
         **groupers: Grouper,
     ) -> DataArrayGroupBy:
         """Returns a DataArrayGroupBy object for performing grouped operations.
@@ -6829,11 +6872,8 @@ class DataArray(
         restore_coord_dims : bool, default: False
             If True, also restore the dimension order of multi-dimensional
             coordinates.
-        eagerly_compute_group: bool
-            Whether to eagerly compute ``group`` when it is a chunked array.
-            This option is to maintain backwards compatibility. Set to False
-            to opt-in to future behaviour, where ``group`` is not automatically loaded
-            into memory.
+        eagerly_compute_group: bool, optional
+            This argument is deprecated.
         **groupers : Mapping of str to Grouper or Resampler
             Mapping of variable name to group by to :py:class:`Grouper` or :py:class:`Resampler` object.
             One of ``group`` or ``groupers`` must be provided.
@@ -6857,12 +6897,12 @@ class DataArray(
         >>> da
         <xarray.DataArray (time: 1827)> Size: 15kB
         array([0.000e+00, 1.000e+00, 2.000e+00, ..., 1.824e+03, 1.825e+03,
-               1.826e+03])
+               1.826e+03], shape=(1827,))
         Coordinates:
           * time     (time) datetime64[ns] 15kB 2000-01-01 2000-01-02 ... 2004-12-31
         >>> da.groupby("time.dayofyear") - da.groupby("time.dayofyear").mean("time")
         <xarray.DataArray (time: 1827)> Size: 15kB
-        array([-730.8, -730.8, -730.8, ...,  730.2,  730.2,  730.5])
+        array([-730.8, -730.8, -730.8, ...,  730.2,  730.2,  730.5], shape=(1827,))
         Coordinates:
           * time       (time) datetime64[ns] 15kB 2000-01-01 2000-01-02 ... 2004-12-31
             dayofyear  (time) int64 15kB 1 2 3 4 5 6 7 8 ... 360 361 362 363 364 365 366
@@ -6886,7 +6926,7 @@ class DataArray(
 
         >>> da.groupby("letters")
         <DataArrayGroupBy, grouped over 1 grouper(s), 2 groups in total:
-            'letters': 2/2 groups present with labels 'a', 'b'>
+            'letters': UniqueGrouper('letters'), 2/2 groups with labels 'a', 'b'>
 
         Execute a reduction
 
@@ -6902,8 +6942,8 @@ class DataArray(
 
         >>> da.groupby(["letters", "x"])
         <DataArrayGroupBy, grouped over 2 grouper(s), 8 groups in total:
-            'letters': 2/2 groups present with labels 'a', 'b'
-            'x': 4/4 groups present with labels 10, 20, 30, 40>
+            'letters': UniqueGrouper('letters'), 2/2 groups with labels 'a', 'b'
+            'x': UniqueGrouper('x'), 4/4 groups with labels 10, 20, 30, 40>
 
         Use Grouper objects to express more complicated GroupBy operations
 
@@ -6917,7 +6957,7 @@ class DataArray(
                [[nan, nan, nan],
                 [ 3.,  4.,  5.]]])
         Coordinates:
-          * x_bins   (x_bins) object 16B (5, 15] (15, 25]
+          * x_bins   (x_bins) interval[int64, right] 32B (5, 15] (15, 25]
           * letters  (letters) object 16B 'a' 'b'
         Dimensions without coordinates: y
 
@@ -6927,7 +6967,7 @@ class DataArray(
         :ref:`groupby`
             Users guide explanation of how to group and bin data.
 
-        :doc:`xarray-tutorial:intermediate/01-high-level-computation-patterns`
+        :doc:`xarray-tutorial:intermediate/computation/01-high-level-computation-patterns`
             Tutorial on :py:func:`~xarray.DataArray.Groupby` for windowed computation
 
         :doc:`xarray-tutorial:fundamentals/03.2_groupby_with_xarray`
@@ -6965,7 +7005,7 @@ class DataArray(
         squeeze: Literal[False] = False,
         restore_coord_dims: bool = False,
         duplicates: Literal["raise", "drop"] = "raise",
-        eagerly_compute_group: bool = True,
+        eagerly_compute_group: Literal[False] | None = None,
     ) -> DataArrayGroupBy:
         """Returns a DataArrayGroupBy object for performing grouped operations.
 
@@ -7002,11 +7042,8 @@ class DataArray(
             coordinates.
         duplicates : {"raise", "drop"}, default: "raise"
             If bin edges are not unique, raise ValueError or drop non-uniques.
-        eagerly_compute_group: bool
-            Whether to eagerly compute ``group`` when it is a chunked array.
-            This option is to maintain backwards compatibility. Set to False
-            to opt-in to future behaviour, where ``group`` is not automatically loaded
-            into memory.
+        eagerly_compute_group: bool, optional
+            This argument is deprecated.
 
         Returns
         -------
@@ -7072,13 +7109,13 @@ class DataArray(
 
         Returns
         -------
-        core.weighted.DataArrayWeighted
+        computation.weighted.DataArrayWeighted
 
         See Also
         --------
         :func:`Dataset.weighted <Dataset.weighted>`
 
-        :ref:`comput.weighted`
+        :ref:`compute.weighted`
             User guide on weighted array reduction using :py:func:`~xarray.DataArray.weighted`
 
         :doc:`xarray-tutorial:fundamentals/03.4_weighted`
@@ -7117,7 +7154,7 @@ class DataArray(
 
         Returns
         -------
-        core.rolling.DataArrayRolling
+        computation.rolling.DataArrayRolling
 
         Examples
         --------
@@ -7157,7 +7194,7 @@ class DataArray(
         --------
         DataArray.cumulative
         Dataset.rolling
-        core.rolling.DataArrayRolling
+        computation.rolling.DataArrayRolling
         """
         from xarray.computation.rolling import DataArrayRolling
 
@@ -7183,7 +7220,7 @@ class DataArray(
 
         Returns
         -------
-        core.rolling.DataArrayRolling
+        computation.rolling.DataArrayRolling
 
         Examples
         --------
@@ -7217,7 +7254,7 @@ class DataArray(
         --------
         DataArray.rolling
         Dataset.cumulative
-        core.rolling.DataArrayRolling
+        computation.rolling.DataArrayRolling
         """
         from xarray.computation.rolling import DataArrayRolling
 
@@ -7265,7 +7302,7 @@ class DataArray(
 
         Returns
         -------
-        core.rolling.DataArrayCoarsen
+        computation.rolling.DataArrayCoarsen
 
         Examples
         --------
@@ -7360,7 +7397,7 @@ class DataArray(
 
         See Also
         --------
-        :class:`core.rolling.DataArrayCoarsen <core.rolling.DataArrayCoarsen>`
+        :class:`computation.rolling.DataArrayCoarsen <computation.rolling.DataArrayCoarsen>`
         :func:`Dataset.coarsen <Dataset.coarsen>`
 
         :ref:`reshape.coarsen`
diff -pruN 2025.03.1-8/xarray/core/dataset.py 2025.10.1-1/xarray/core/dataset.py
--- 2025.03.1-8/xarray/core/dataset.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/xarray/core/dataset.py	2025-10-10 10:38:05.000000000 +0000
@@ -1,7 +1,9 @@
 from __future__ import annotations
 
+import asyncio
 import copy
 import datetime
+import io
 import math
 import sys
 import warnings
@@ -26,7 +28,6 @@ from typing import IO, TYPE_CHECKING, An
 
 import numpy as np
 import pandas as pd
-from pandas.api.types import is_extension_array_dtype
 
 from xarray.coding.calendar_ops import convert_calendar, interp_calendar
 from xarray.coding.cftimeindex import CFTimeIndex, _parse_array_of_cftime_strings
@@ -34,16 +35,12 @@ from xarray.compat.array_api_compat impo
 from xarray.computation import ops
 from xarray.computation.arithmetic import DatasetArithmetic
 from xarray.core import dtypes as xrdtypes
-from xarray.core import (
-    duck_array_ops,
-    formatting,
-    formatting_html,
-    utils,
-)
+from xarray.core import duck_array_ops, formatting, formatting_html, utils
 from xarray.core._aggregations import DatasetAggregations
 from xarray.core.common import (
     DataWithCoords,
     _contains_datetime_like_objects,
+    _is_numeric_aggregatable_dtype,
     get_chunksizes,
 )
 from xarray.core.coordinates import (
@@ -91,6 +88,7 @@ from xarray.core.utils import (
     either_dict_or_kwargs,
     emit_user_level_warning,
     infix_dims,
+    is_allowed_extension_array,
     is_dict_like,
     is_duck_array,
     is_duck_dask_array,
@@ -99,6 +97,7 @@ from xarray.core.utils import (
     parse_dims_as_set,
 )
 from xarray.core.variable import (
+    UNSUPPORTED_EXTENSION_ARRAY_TYPES,
     IndexVariable,
     Variable,
     as_variable,
@@ -121,7 +120,13 @@ from xarray.structure.merge import (
     merge_coordinates_without_align,
     merge_data_and_coords,
 )
-from xarray.util.deprecation_helpers import _deprecate_positional_args, deprecate_dims
+from xarray.util.deprecation_helpers import (
+    _COMPAT_DEFAULT,
+    _JOIN_DEFAULT,
+    CombineKwargDefault,
+    _deprecate_positional_args,
+    deprecate_dims,
+)
 
 if TYPE_CHECKING:
     from dask.dataframe import DataFrame as DaskDataFrame
@@ -310,10 +315,10 @@ class Dataset(
     <xarray.Dataset> Size: 552B
     Dimensions:         (loc: 2, instrument: 3, time: 4)
     Coordinates:
-        lon             (loc) float64 16B -99.83 -99.32
-        lat             (loc) float64 16B 42.25 42.21
       * instrument      (instrument) <U8 96B 'manufac1' 'manufac2' 'manufac3'
       * time            (time) datetime64[ns] 32B 2014-09-06 ... 2014-09-09
+        lon             (loc) float64 16B -99.83 -99.32
+        lat             (loc) float64 16B 42.25 42.21
         reference_time  datetime64[ns] 8B 2014-09-05
     Dimensions without coordinates: loc
     Data variables:
@@ -512,9 +517,11 @@ class Dataset(
         )
 
     def load(self, **kwargs) -> Self:
-        """Manually trigger loading and/or computation of this dataset's data
-        from disk or a remote source into memory and return this dataset.
-        Unlike compute, the original dataset is modified and returned.
+        """Trigger loading data into memory and return this dataset.
+
+        Data will be computed and/or loaded from disk or a remote source.
+
+        Unlike ``.compute``, the original dataset is modified and returned.
 
         Normally, it should not be necessary to call this method in user code,
         because all xarray functions should either work on deferred data or
@@ -526,29 +533,93 @@ class Dataset(
         **kwargs : dict
             Additional keyword arguments passed on to ``dask.compute``.
 
+        Returns
+        -------
+        object : Dataset
+            Same object but with lazy data variables and coordinates as in-memory arrays.
+
         See Also
         --------
         dask.compute
+        Dataset.compute
+        Dataset.load_async
+        DataArray.load
+        Variable.load
         """
         # access .data to coerce everything to numpy or dask arrays
-        lazy_data = {
+        chunked_data = {
             k: v._data for k, v in self.variables.items() if is_chunked_array(v._data)
         }
-        if lazy_data:
-            chunkmanager = get_chunked_array_type(*lazy_data.values())
+        if chunked_data:
+            chunkmanager = get_chunked_array_type(*chunked_data.values())
 
             # evaluate all the chunked arrays simultaneously
             evaluated_data: tuple[np.ndarray[Any, Any], ...] = chunkmanager.compute(
-                *lazy_data.values(), **kwargs
+                *chunked_data.values(), **kwargs
             )
 
-            for k, data in zip(lazy_data, evaluated_data, strict=False):
+            for k, data in zip(chunked_data, evaluated_data, strict=False):
                 self.variables[k].data = data
 
         # load everything else sequentially
-        for k, v in self.variables.items():
-            if k not in lazy_data:
-                v.load()
+        [v.load() for k, v in self.variables.items() if k not in chunked_data]
+
+        return self
+
+    async def load_async(self, **kwargs) -> Self:
+        """Trigger and await asynchronous loading of data into memory and return this dataset.
+
+        Data will be computed and/or loaded from disk or a remote source.
+
+        Unlike ``.compute``, the original dataset is modified and returned.
+
+        Only works when opening data lazily from IO storage backends which support lazy asynchronous loading.
+        Otherwise will raise a NotImplementedError.
+
+        Note users are expected to limit concurrency themselves - xarray does not internally limit concurrency in any way.
+
+        Parameters
+        ----------
+        **kwargs : dict
+            Additional keyword arguments passed on to ``dask.compute``.
+
+        Returns
+        -------
+        object : Dataset
+            Same object but with lazy data variables and coordinates as in-memory arrays.
+
+        See Also
+        --------
+        dask.compute
+        Dataset.compute
+        Dataset.load
+        DataArray.load_async
+        Variable.load_async
+        """
+        # TODO refactor this to pull out the common chunked_data codepath
+
+        # this blocks on chunked arrays but not on lazily indexed arrays
+
+        # access .data to coerce everything to numpy or dask arrays
+        chunked_data = {
+            k: v._data for k, v in self.variables.items() if is_chunked_array(v._data)
+        }
+        if chunked_data:
+            chunkmanager = get_chunked_array_type(*chunked_data.values())
+
+            # evaluate all the chunked arrays simultaneously
+            evaluated_data: tuple[np.ndarray[Any, Any], ...] = chunkmanager.compute(
+                *chunked_data.values(), **kwargs
+            )
+
+            for k, data in zip(chunked_data, evaluated_data, strict=False):
+                self.variables[k].data = data
+
+        # load everything else concurrently
+        coros = [
+            v.load_async() for k, v in self.variables.items() if k not in chunked_data
+        ]
+        await asyncio.gather(*coros)
 
         return self
 
@@ -687,9 +758,11 @@ class Dataset(
         )
 
     def compute(self, **kwargs) -> Self:
-        """Manually trigger loading and/or computation of this dataset's data
-        from disk or a remote source into memory and return a new dataset.
-        Unlike load, the original dataset is left unaltered.
+        """Trigger loading data into memory and return a new dataset.
+
+        Data will be computed and/or loaded from disk or a remote source.
+
+        Unlike ``.load``, the original dataset is left unaltered.
 
         Normally, it should not be necessary to call this method in user code,
         because all xarray functions should either work on deferred data or
@@ -709,6 +782,10 @@ class Dataset(
         See Also
         --------
         dask.compute
+        Dataset.load
+        Dataset.load_async
+        DataArray.compute
+        Variable.compute
         """
         new = self.copy(deep=False)
         return new.load(**kwargs)
@@ -790,9 +867,9 @@ class Dataset(
         variables: dict[Hashable, Variable] | None = None,
         coord_names: set[Hashable] | None = None,
         dims: dict[Any, int] | None = None,
-        attrs: dict[Hashable, Any] | None | Default = _default,
+        attrs: dict[Hashable, Any] | Default | None = _default,
         indexes: dict[Hashable, Index] | None = None,
-        encoding: dict | None | Default = _default,
+        encoding: dict | Default | None = _default,
         inplace: bool = False,
     ) -> Self:
         """Fastpath constructor for internal use.
@@ -839,7 +916,7 @@ class Dataset(
         self,
         variables: dict[Hashable, Variable],
         coord_names: set | None = None,
-        attrs: dict[Hashable, Any] | None | Default = _default,
+        attrs: dict[Hashable, Any] | Default | None = _default,
         indexes: dict[Hashable, Index] | None = None,
         inplace: bool = False,
     ) -> Self:
@@ -854,7 +931,7 @@ class Dataset(
         variables: dict[Hashable, Variable],
         coord_names: set | None = None,
         dims: dict[Hashable, int] | None = None,
-        attrs: dict[Hashable, Any] | None | Default = _default,
+        attrs: dict[Hashable, Any] | Default | None = _default,
         inplace: bool = False,
     ) -> Self:
         """Deprecated version of _replace_with_new_dims().
@@ -1122,7 +1199,7 @@ class Dataset(
                     coord_names.add(var_name)
                 if (var_name,) == var.dims:
                     index, index_vars = create_default_index_implicit(var, names)
-                    indexes.update({k: index for k in index_vars})
+                    indexes.update(dict.fromkeys(index_vars, index))
                     variables.update(index_vars)
                     coord_names.update(index_vars)
 
@@ -1159,7 +1236,15 @@ class Dataset(
         coords: dict[Hashable, Variable] = {}
         # preserve ordering
         for k in self._variables:
-            if k in self._coord_names and set(self._variables[k].dims) <= needed_dims:
+            if k in self._indexes:
+                add_coord = self._indexes[k].should_add_coord_to_array(
+                    k, self._variables[k], needed_dims
+                )
+            else:
+                var_dims = set(self._variables[k].dims)
+                add_coord = k in self._coord_names and var_dims <= needed_dims
+
+            if add_coord:
                 coords[k] = self._variables[k]
 
         indexes = filter_indexes_from_coords(self._indexes, set(coords))
@@ -1350,7 +1435,6 @@ class Dataset(
         to avoid leaving the dataset in a partially updated state when an error occurs.
         """
         from xarray.core.dataarray import DataArray
-        from xarray.structure.alignment import align
 
         if isinstance(value, Dataset):
             missing_vars = [
@@ -1722,8 +1806,8 @@ class Dataset(
         <xarray.Dataset> Size: 48B
         Dimensions:   (time: 3)
         Coordinates:
-            pressure  (time) float64 24B 1.013 1.2 3.5
           * time      (time) datetime64[ns] 24B 2023-01-01 2023-01-02 2023-01-03
+            pressure  (time) float64 24B 1.013 1.2 3.5
         Data variables:
             *empty*
 
@@ -1850,7 +1934,7 @@ class Dataset(
 
     def dump_to_store(self, store: AbstractDataStore, **kwargs) -> None:
         """Store dataset contents to a backends.*DataStore object."""
-        from xarray.backends.api import dump_to_store
+        from xarray.backends.writers import dump_to_store
 
         # TODO: rename and/or cleanup this method to make it more consistent
         # with to_netcdf()
@@ -1870,7 +1954,7 @@ class Dataset(
         compute: bool = True,
         invalid_netcdf: bool = False,
         auto_complex: bool | None = None,
-    ) -> bytes: ...
+    ) -> memoryview: ...
 
     # compute=False returns dask.Delayed
     @overload
@@ -1893,7 +1977,7 @@ class Dataset(
     @overload
     def to_netcdf(
         self,
-        path: str | PathLike,
+        path: str | PathLike | io.IOBase,
         mode: NetcdfWriteModes = "w",
         format: T_NetcdfTypes | None = None,
         group: str | None = None,
@@ -1924,7 +2008,7 @@ class Dataset(
 
     def to_netcdf(
         self,
-        path: str | PathLike | None = None,
+        path: str | PathLike | io.IOBase | None = None,
         mode: NetcdfWriteModes = "w",
         format: T_NetcdfTypes | None = None,
         group: str | None = None,
@@ -1934,17 +2018,15 @@ class Dataset(
         compute: bool = True,
         invalid_netcdf: bool = False,
         auto_complex: bool | None = None,
-    ) -> bytes | Delayed | None:
+    ) -> memoryview | Delayed | None:
         """Write dataset contents to a netCDF file.
 
         Parameters
         ----------
-        path : str, path-like or file-like, optional
-            Path to which to save this dataset. File-like objects are only
-            supported by the scipy engine. If no path is provided, this
-            function returns the resulting netCDF file as bytes; in this case,
-            we need to use scipy, which does not support netCDF version 4 (the
-            default format becomes NETCDF3_64BIT).
+        path : str, path-like, file-like or None, optional
+            Path to which to save this datatree, or a file-like object to write
+            it to (which must support read and write and be seekable) or None
+            (default) to return in-memory bytes as a memoryview.
         mode : {"w", "a"}, default: "w"
             Write ('w') or append ('a') mode. If mode='w', any existing file at
             this location will be overwritten. If mode='a', existing variables
@@ -1973,10 +2055,11 @@ class Dataset(
         group : str, optional
             Path to the netCDF4 group in the given file to open (only works for
             format='NETCDF4'). The group(s) will be created if necessary.
-        engine : {"netcdf4", "scipy", "h5netcdf"}, optional
+        engine : {"netcdf4", "h5netcdf", "scipy"}, optional
             Engine to use when writing netCDF files. If not provided, the
-            default engine is chosen based on available dependencies, with a
-            preference for 'netcdf4' if writing to a file on disk.
+            default engine is chosen based on available dependencies, by default
+            preferring "netcdf4" over "h5netcdf" over "scipy" (customizable via
+            ``netcdf_engine_order`` in ``xarray.set_options()``).
         encoding : dict, optional
             Nested dictionary with variable names as keys and dictionaries of
             variable specific encodings as values, e.g.,
@@ -2006,9 +2089,9 @@ class Dataset(
 
         Returns
         -------
-            * ``bytes`` if path is None
+            * ``memoryview`` if path is None
             * ``dask.delayed.Delayed`` if compute is False
-            * None otherwise
+            * ``None`` otherwise
 
         See Also
         --------
@@ -2016,7 +2099,7 @@ class Dataset(
         """
         if encoding is None:
             encoding = {}
-        from xarray.backends.api import to_netcdf
+        from xarray.backends.writers import to_netcdf
 
         return to_netcdf(  # type: ignore[return-value]  # mypy cannot resolve the overloads:(
             self,
@@ -2049,6 +2132,7 @@ class Dataset(
         append_dim: Hashable | None = None,
         region: Mapping[str, slice | Literal["auto"]] | Literal["auto"] | None = None,
         safe_chunks: bool = True,
+        align_chunks: bool = False,
         storage_options: dict[str, str] | None = None,
         zarr_version: int | None = None,
         zarr_format: int | None = None,
@@ -2072,6 +2156,7 @@ class Dataset(
         append_dim: Hashable | None = None,
         region: Mapping[str, slice | Literal["auto"]] | Literal["auto"] | None = None,
         safe_chunks: bool = True,
+        align_chunks: bool = False,
         storage_options: dict[str, str] | None = None,
         zarr_version: int | None = None,
         zarr_format: int | None = None,
@@ -2093,6 +2178,7 @@ class Dataset(
         append_dim: Hashable | None = None,
         region: Mapping[str, slice | Literal["auto"]] | Literal["auto"] | None = None,
         safe_chunks: bool = True,
+        align_chunks: bool = False,
         storage_options: dict[str, str] | None = None,
         zarr_version: int | None = None,
         zarr_format: int | None = None,
@@ -2202,6 +2288,16 @@ class Dataset(
             two or more chunked arrays in the same location in parallel if they are
             not writing in independent regions, for those cases it is better to use
             a synchronizer.
+        align_chunks: bool, default False
+            If True, rechunks the Dask array to align with Zarr chunks before writing.
+            This ensures each Dask chunk maps to one or more contiguous Zarr chunks,
+            which avoids race conditions.
+            Internally, the process sets safe_chunks=False and tries to preserve
+            the original Dask chunking as much as possible.
+            Note: While this alignment avoids write conflicts stemming from chunk
+            boundary misalignment, it does not protect against race conditions
+            if multiple uncoordinated processes write to the same
+            Zarr array concurrently.
         storage_options : dict, optional
             Any additional parameters for the storage backend (ignored for local
             paths).
@@ -2254,10 +2350,15 @@ class Dataset(
             used. Override any existing encodings by providing the ``encoding`` kwarg.
 
         ``fill_value`` handling:
-            There exists a subtlety in interpreting zarr's ``fill_value`` property. For zarr v2 format
-            arrays, ``fill_value`` is *always* interpreted as an invalid value similar to the ``_FillValue`` attribute
-            in CF/netCDF. For Zarr v3 format arrays, only an explicit ``_FillValue`` attribute will be used
-            to mask the data if requested using ``mask_and_scale=True``. See this `Github issue <https://github.com/pydata/xarray/issues/5475>`_
+            There exists a subtlety in interpreting zarr's ``fill_value`` property.
+            For Zarr v2 format arrays, ``fill_value`` is *always* interpreted as an
+            invalid value similar to the ``_FillValue`` attribute in CF/netCDF.
+            For Zarr v3 format arrays, only an explicit ``_FillValue`` attribute
+            will be used to mask the data if requested using ``mask_and_scale=True``.
+            To customize the fill value Zarr uses as a default for unwritten
+            chunks on disk, set ``_FillValue`` in encoding for Zarr v2 or
+            ``fill_value`` for Zarr v3.
+            See this `Github issue <https://github.com/pydata/xarray/issues/5475>`_
             for more.
 
         See Also
@@ -2265,7 +2366,7 @@ class Dataset(
         :ref:`io.zarr`
             The I/O user guide, with more details and examples.
         """
-        from xarray.backends.api import to_zarr
+        from xarray.backends.writers import to_zarr
 
         return to_zarr(  # type: ignore[call-overload,misc]
             self,
@@ -2281,6 +2382,7 @@ class Dataset(
             append_dim=append_dim,
             region=region,
             safe_chunks=safe_chunks,
+            align_chunks=align_chunks,
             zarr_version=zarr_version,
             zarr_format=zarr_format,
             write_empty_chunks=write_empty_chunks,
@@ -2312,9 +2414,10 @@ class Dataset(
         if buf is None:  # pragma: no cover
             buf = sys.stdout
 
-        lines = []
-        lines.append("xarray.Dataset {")
-        lines.append("dimensions:")
+        lines = [
+            "xarray.Dataset {",
+            "dimensions:",
+        ]
         for name, size in self.sizes.items():
             lines.append(f"\t{name} = {size} ;")
         lines.append("\nvariables:")
@@ -2389,13 +2492,16 @@ class Dataset(
         sizes along that dimension will not be updated; non-dask arrays will be
         converted into dask arrays with a single block.
 
-        Along datetime-like dimensions, a :py:class:`groupers.TimeResampler` object is also accepted.
+        Along datetime-like dimensions, a :py:class:`Resampler` object
+        (e.g. :py:class:`groupers.TimeResampler` or :py:class:`groupers.SeasonResampler`)
+        is also accepted.
 
         Parameters
         ----------
-        chunks : int, tuple of int, "auto" or mapping of hashable to int or a TimeResampler, optional
+        chunks : int, tuple of int, "auto" or mapping of hashable to int or a Resampler, optional
             Chunk sizes along each dimension, e.g., ``5``, ``"auto"``, or
-            ``{"x": 5, "y": 5}`` or ``{"x": 5, "time": TimeResampler(freq="YE")}``.
+            ``{"x": 5, "y": 5}`` or ``{"x": 5, "time": TimeResampler(freq="YE")}`` or
+            ``{"time": SeasonResampler(["DJF", "MAM", "JJA", "SON"])}``.
         name_prefix : str, default: "xarray-"
             Prefix for the name of any new dask arrays.
         token : str, optional
@@ -2430,8 +2536,7 @@ class Dataset(
         xarray.unify_chunks
         dask.array.from_array
         """
-        from xarray.core.dataarray import DataArray
-        from xarray.groupers import TimeResampler
+        from xarray.groupers import Resampler
 
         if chunks is None and not chunks_kwargs:
             warnings.warn(
@@ -2459,41 +2564,29 @@ class Dataset(
                 f"chunks keys {tuple(bad_dims)} not found in data dimensions {tuple(self.sizes.keys())}"
             )
 
-        def _resolve_frequency(
-            name: Hashable, resampler: TimeResampler
-        ) -> tuple[int, ...]:
+        def _resolve_resampler(name: Hashable, resampler: Resampler) -> tuple[int, ...]:
             variable = self._variables.get(name, None)
             if variable is None:
                 raise ValueError(
-                    f"Cannot chunk by resampler {resampler!r} for virtual variables."
+                    f"Cannot chunk by resampler {resampler!r} for virtual variable {name!r}."
                 )
-            elif not _contains_datetime_like_objects(variable):
+            if variable.ndim != 1:
                 raise ValueError(
-                    f"chunks={resampler!r} only supported for datetime variables. "
-                    f"Received variable {name!r} with dtype {variable.dtype!r} instead."
+                    f"chunks={resampler!r} only supported for 1D variables. "
+                    f"Received variable {name!r} with {variable.ndim} dimensions instead."
                 )
-
-            assert variable.ndim == 1
-            chunks = (
-                DataArray(
-                    np.ones(variable.shape, dtype=int),
-                    dims=(name,),
-                    coords={name: variable},
-                )
-                .resample({name: resampler})
-                .sum()
-            )
-            # When bins (binning) or time periods are missing (resampling)
-            # we can end up with NaNs. Drop them.
-            if chunks.dtype.kind == "f":
-                chunks = chunks.dropna(name).astype(int)
-            chunks_tuple: tuple[int, ...] = tuple(chunks.data.tolist())
-            return chunks_tuple
+            newchunks = resampler.compute_chunks(variable, dim=name)
+            if sum(newchunks) != variable.shape[0]:
+                raise ValueError(
+                    f"Logic bug in rechunking variable {name!r} using {resampler!r}. "
+                    "New chunks tuple does not match size of data. Please open an issue."
+                )
+            return newchunks
 
         chunks_mapping_ints: Mapping[Any, T_ChunkDim] = {
             name: (
-                _resolve_frequency(name, chunks)
-                if isinstance(chunks, TimeResampler)
+                _resolve_resampler(name, chunks)
+                if isinstance(chunks, Resampler)
                 else chunks
             )
             for name, chunks in chunks_mapping.items()
@@ -2528,14 +2621,13 @@ class Dataset(
         + string indexers are cast to the appropriate date type if the
           associated index is a DatetimeIndex or CFTimeIndex
         """
-        from xarray.coding.cftimeindex import CFTimeIndex
         from xarray.core.dataarray import DataArray
 
         indexers = drop_dims_from_indexers(indexers, self.dims, missing_dims)
 
         # all indexers should be int, slice, np.ndarrays, or Variable
         for k, v in indexers.items():
-            if isinstance(v, int | slice | Variable):
+            if isinstance(v, int | slice | Variable) and not isinstance(v, bool):
                 yield k, v
             elif isinstance(v, DataArray):
                 yield k, v.variable
@@ -2896,9 +2988,8 @@ class Dataset(
             for k, v in query_results.variables.items():
                 if v.dims:
                     no_scalar_variables[k] = v
-                else:
-                    if k in self._coord_names:
-                        query_results.drop_coords.append(k)
+                elif k in self._coord_names:
+                    query_results.drop_coords.append(k)
             query_results.variables = no_scalar_variables
 
         result = self.isel(indexers=query_results.dim_indexers, drop=drop)
@@ -3012,7 +3103,7 @@ class Dataset(
             if not isinstance(indexers, int) and not is_dict_like(indexers):
                 raise TypeError("indexers must be either dict-like or a single integer")
         if isinstance(indexers, int):
-            indexers = {dim: indexers for dim in self.dims}
+            indexers = dict.fromkeys(self.dims, indexers)
         indexers = either_dict_or_kwargs(indexers, indexers_kwargs, "head")
         for k, v in indexers.items():
             if not isinstance(v, int):
@@ -3100,7 +3191,7 @@ class Dataset(
             if not isinstance(indexers, int) and not is_dict_like(indexers):
                 raise TypeError("indexers must be either dict-like or a single integer")
         if isinstance(indexers, int):
-            indexers = {dim: indexers for dim in self.dims}
+            indexers = dict.fromkeys(self.dims, indexers)
         indexers = either_dict_or_kwargs(indexers, indexers_kwargs, "tail")
         for k, v in indexers.items():
             if not isinstance(v, int):
@@ -3186,7 +3277,7 @@ class Dataset(
         ):
             raise TypeError("indexers must be either dict-like or a single integer")
         if isinstance(indexers, int):
-            indexers = {dim: indexers for dim in self.dims}
+            indexers = dict.fromkeys(self.dims, indexers)
         indexers = either_dict_or_kwargs(indexers, indexers_kwargs, "thin")
         for k, v in indexers.items():
             if not isinstance(v, int):
@@ -3714,8 +3805,8 @@ class Dataset(
         <xarray.Dataset> Size: 224B
         Dimensions:  (x: 4, y: 4)
         Coordinates:
-          * y        (y) int64 32B 10 12 14 16
           * x        (x) float64 32B 0.0 0.75 1.25 1.75
+          * y        (y) int64 32B 10 12 14 16
         Data variables:
             a        (x) float64 32B 5.0 6.5 6.25 4.75
             b        (x, y) float64 128B 1.0 4.0 2.0 nan 1.75 ... nan 5.0 nan 5.25 nan
@@ -3726,8 +3817,8 @@ class Dataset(
         <xarray.Dataset> Size: 224B
         Dimensions:  (x: 4, y: 4)
         Coordinates:
-          * y        (y) int64 32B 10 12 14 16
           * x        (x) float64 32B 0.0 0.75 1.25 1.75
+          * y        (y) int64 32B 10 12 14 16
         Data variables:
             a        (x) float64 32B 5.0 7.0 7.0 4.0
             b        (x, y) float64 128B 1.0 4.0 2.0 9.0 2.0 7.0 ... nan 6.0 nan 5.0 8.0
@@ -3742,8 +3833,8 @@ class Dataset(
         <xarray.Dataset> Size: 224B
         Dimensions:  (x: 4, y: 4)
         Coordinates:
-          * y        (y) int64 32B 10 12 14 16
           * x        (x) float64 32B 1.0 1.5 2.5 3.5
+          * y        (y) int64 32B 10 12 14 16
         Data variables:
             a        (x) float64 32B 7.0 5.5 2.5 -0.5
             b        (x, y) float64 128B 2.0 7.0 6.0 nan 4.0 ... nan 12.0 nan 3.5 nan
@@ -3797,15 +3888,16 @@ class Dataset(
             for k, v in indexers.items()
         }
 
+        # optimization: subset to coordinate range of the target index
+        if method in ["linear", "nearest"]:
+            for k, v in validated_indexers.items():
+                obj, newidx = missing._localize(obj, {k: v})
+                validated_indexers[k] = newidx[k]
+
         has_chunked_array = bool(
             any(is_chunked_array(v._data) for v in obj._variables.values())
         )
         if has_chunked_array:
-            # optimization: subset to coordinate range of the target index
-            if method in ["linear", "nearest"]:
-                for k, v in validated_indexers.items():
-                    obj, newidx = missing._localize(obj, {k: v})
-                    validated_indexers[k] = newidx[k]
             # optimization: create dask coordinate arrays once per Dataset
             # rather than once per Variable when dask.array.unify_chunks is called later
             # GH4739
@@ -3821,7 +3913,7 @@ class Dataset(
                 continue
 
             use_indexers = (
-                dask_indexers if is_duck_dask_array(var.data) else validated_indexers
+                dask_indexers if is_duck_dask_array(var._data) else validated_indexers
             )
 
             dtype_kind = var.dtype.kind
@@ -3830,13 +3922,22 @@ class Dataset(
                 var_indexers = {k: v for k, v in use_indexers.items() if k in var.dims}
                 variables[name] = missing.interp(var, var_indexers, method, **kwargs)
             elif dtype_kind in "ObU" and (use_indexers.keys() & var.dims):
-                # For types that we do not understand do stepwise
-                # interpolation to avoid modifying the elements.
-                # reindex the variable instead because it supports
-                # booleans and objects and retains the dtype but inside
-                # this loop there might be some duplicate code that slows it
-                # down, therefore collect these signals and run it later:
-                reindex_vars.append(name)
+                if all(var.sizes[d] == 1 for d in (use_indexers.keys() & var.dims)):
+                    # Broadcastable, can be handled quickly without reindex:
+                    to_broadcast = (var.squeeze(),) + tuple(
+                        dest for _, dest in use_indexers.values()
+                    )
+                    variables[name] = broadcast_variables(*to_broadcast)[0].copy(
+                        deep=True
+                    )
+                else:
+                    # For types that we do not understand do stepwise
+                    # interpolation to avoid modifying the elements.
+                    # reindex the variable instead because it supports
+                    # booleans and objects and retains the dtype but inside
+                    # this loop there might be some duplicate code that slows it
+                    # down, therefore collect these signals and run it later:
+                    reindex_vars.append(name)
             elif all(d not in indexers for d in var.dims):
                 # For anything else we can only keep variables if they
                 # are not dependent on any coords that are being
@@ -4029,7 +4130,7 @@ class Dataset(
         for index, coord_names in self.xindexes.group_by_index():
             new_index = index.rename(name_dict, dims_dict)
             new_coord_names = [name_dict.get(k, k) for k in coord_names]
-            indexes.update({k: new_index for k in new_coord_names})
+            indexes.update(dict.fromkeys(new_coord_names, new_index))
             new_index_vars = new_index.create_variables(
                 {
                     new: self._variables[old]
@@ -4257,8 +4358,8 @@ class Dataset(
         <xarray.Dataset> Size: 56B
         Dimensions:  (y: 2)
         Coordinates:
-            x        (y) <U1 8B 'a' 'b'
           * y        (y) int64 16B 0 1
+            x        (y) <U1 8B 'a' 'b'
         Data variables:
             a        (y) int64 16B 5 7
             b        (y) float64 16B 0.1 2.4
@@ -4315,7 +4416,7 @@ class Dataset(
                     variables[current_name] = var
                 else:
                     index, index_vars = create_default_index_implicit(var)
-                    indexes.update({name: index for name in index_vars})
+                    indexes.update(dict.fromkeys(index_vars, index))
                     variables.update(index_vars)
                     coord_names.update(index_vars)
             else:
@@ -4327,8 +4428,8 @@ class Dataset(
 
     def expand_dims(
         self,
-        dim: None | Hashable | Sequence[Hashable] | Mapping[Any, Any] = None,
-        axis: None | int | Sequence[int] = None,
+        dim: Hashable | Sequence[Hashable] | Mapping[Any, Any] | None = None,
+        axis: int | Sequence[int] | None = None,
         create_index_for_new_dim: bool = True,
         **dim_kwargs: Any,
     ) -> Self:
@@ -4474,7 +4575,7 @@ class Dataset(
         elif isinstance(dim, Sequence):
             if len(dim) != len(set(dim)):
                 raise ValueError("dims should not contain duplicate values.")
-            dim = {d: 1 for d in dim}
+            dim = dict.fromkeys(dim, 1)
 
         dim = either_dict_or_kwargs(dim, dim_kwargs, "expand_dims")
         assert isinstance(dim, MutableMapping)
@@ -4544,26 +4645,25 @@ class Dataset(
                     for d, c in zip_axis_dim:
                         all_dims.insert(d, c)
                     variables[k] = v.set_dims(dict(all_dims))
-            else:
-                if k not in variables:
-                    if k in coord_names and create_index_for_new_dim:
-                        # If dims includes a label of a non-dimension coordinate,
-                        # it will be promoted to a 1D coordinate with a single value.
-                        index, index_vars = create_default_index_implicit(v.set_dims(k))
-                        indexes[k] = index
-                        variables.update(index_vars)
-                    else:
-                        if create_index_for_new_dim:
-                            warnings.warn(
-                                f"No index created for dimension {k} because variable {k} is not a coordinate. "
-                                f"To create an index for {k}, please first call `.set_coords('{k}')` on this object.",
-                                UserWarning,
-                                stacklevel=2,
-                            )
+            elif k not in variables:
+                if k in coord_names and create_index_for_new_dim:
+                    # If dims includes a label of a non-dimension coordinate,
+                    # it will be promoted to a 1D coordinate with a single value.
+                    index, index_vars = create_default_index_implicit(v.set_dims(k))
+                    indexes[k] = index
+                    variables.update(index_vars)
+                else:
+                    if create_index_for_new_dim:
+                        warnings.warn(
+                            f"No index created for dimension {k} because variable {k} is not a coordinate. "
+                            f"To create an index for {k}, please first call `.set_coords('{k}')` on this object.",
+                            UserWarning,
+                            stacklevel=2,
+                        )
 
-                        # create 1D variable without creating a new index
-                        new_1d_var = v.set_dims(k)
-                        variables.update({k: new_1d_var})
+                    # create 1D variable without creating a new index
+                    new_1d_var = v.set_dims(k)
+                    variables.update({k: new_1d_var})
 
         return self._replace_with_new_dims(
             variables, coord_names=coord_names, indexes=indexes
@@ -4700,7 +4800,7 @@ class Dataset(
                 for n in idx.index.names:
                     replace_dims[n] = dim
 
-            new_indexes.update({k: idx for k in idx_vars})
+            new_indexes.update(dict.fromkeys(idx_vars, idx))
             new_variables.update(idx_vars)
 
         # re-add deindexed coordinates (convert to base variables)
@@ -4816,7 +4916,7 @@ class Dataset(
                     # instead replace it by a new (multi-)index with dropped level(s)
                     idx = index.keep_levels(keep_level_vars)
                     idx_vars = idx.create_variables(keep_level_vars)
-                    new_indexes.update({k: idx for k in idx_vars})
+                    new_indexes.update(dict.fromkeys(idx_vars, idx))
                     new_variables.update(idx_vars)
                     if not isinstance(idx, PandasMultiIndex):
                         # multi-index reduced to single index
@@ -4882,9 +4982,8 @@ class Dataset(
                 index_cls = PandasIndex
             else:
                 index_cls = PandasMultiIndex
-        else:
-            if not issubclass(index_cls, Index):
-                raise TypeError(f"{index_cls} is not a subclass of xarray.Index")
+        elif not issubclass(index_cls, Index):
+            raise TypeError(f"{index_cls} is not a subclass of xarray.Index")
 
         invalid_coords = set(coord_names) - self._coord_names
 
@@ -4921,6 +5020,20 @@ class Dataset(
         if isinstance(index, PandasMultiIndex):
             coord_names = [index.dim] + list(coord_names)
 
+        # Check for extra variables that don't match the coordinate names
+        extra_vars = set(new_coord_vars) - set(coord_names)
+        if extra_vars:
+            extra_vars_str = ", ".join(f"'{name}'" for name in extra_vars)
+            coord_names_str = ", ".join(f"'{name}'" for name in coord_names)
+            raise ValueError(
+                f"The index created extra variables {extra_vars_str} that are not "
+                f"in the list of coordinates {coord_names_str}. "
+                f"Use a factory method pattern instead:\n"
+                f"  index = {index_cls.__name__}.from_variables(ds, {list(coord_names)!r})\n"
+                f"  coords = xr.Coordinates.from_xindex(index)\n"
+                f"  ds = ds.assign_coords(coords)"
+            )
+
         variables: dict[Hashable, Variable]
         indexes: dict[Hashable, Index]
 
@@ -4996,7 +5109,7 @@ class Dataset(
             level_vars = {k: self._variables[k] for k in order}
             idx = index.reorder_levels(level_vars)
             idx_vars = idx.create_variables(level_vars)
-            new_indexes.update({k: idx for k in idx_vars})
+            new_indexes.update(dict.fromkeys(idx_vars, idx))
             new_variables.update(idx_vars)
 
         indexes = {k: v for k, v in self._indexes.items() if k not in new_indexes}
@@ -5104,7 +5217,7 @@ class Dataset(
             if len(product_vars) == len(dims):
                 idx = index_cls.stack(product_vars, new_dim)
                 new_indexes[new_dim] = idx
-                new_indexes.update({k: idx for k in product_vars})
+                new_indexes.update(dict.fromkeys(product_vars, idx))
                 idx_vars = idx.create_variables(product_vars)
                 # keep consistent multi-index coordinate order
                 for k in idx_vars:
@@ -5246,7 +5359,13 @@ class Dataset(
         """
         from xarray.structure.concat import concat
 
-        stacking_dims = tuple(dim for dim in self.dims if dim not in sample_dims)
+        # add stacking dims by order of appearance
+        stacking_dims_list: list[Hashable] = []
+        for da in self.data_vars.values():
+            for dim in da.dims:
+                if dim not in sample_dims and dim not in stacking_dims_list:
+                    stacking_dims_list.append(dim)
+        stacking_dims = tuple(stacking_dims_list)
 
         for key, da in self.data_vars.items():
             missing_sample_dims = set(sample_dims) - set(da.dims)
@@ -5273,7 +5392,14 @@ class Dataset(
 
         # concatenate the arrays
         stackable_vars = [stack_dataarray(da) for da in self.data_vars.values()]
-        data_array = concat(stackable_vars, dim=new_dim)
+        data_array = concat(
+            stackable_vars,
+            dim=new_dim,
+            data_vars="all",
+            coords="different",
+            compat="equals",
+            join="outer",
+        )
 
         if name is not None:
             data_array.name = name
@@ -5351,7 +5477,7 @@ class Dataset(
             # TODO: we may depreciate implicit re-indexing with a pandas.MultiIndex
             xr_full_idx = PandasMultiIndex(full_idx, dim)
             indexers = Indexes(
-                {k: xr_full_idx for k in index_vars},
+                dict.fromkeys(index_vars, xr_full_idx),
                 xr_full_idx.create_variables(index_vars),
             )
             obj = self._reindex(
@@ -5474,7 +5600,7 @@ class Dataset(
                 result = result._unstack_once(d, stacked_indexes[d], fill_value, sparse)
         return result
 
-    def update(self, other: CoercibleMapping) -> Self:
+    def update(self, other: CoercibleMapping) -> None:
         """Update this dataset's variables with those from another dataset.
 
         Just like :py:meth:`dict.update` this is a in-place operation.
@@ -5491,14 +5617,6 @@ class Dataset(
             - mapping {var name: (dimension name, array-like)}
             - mapping {var name: (tuple of dimension names, array-like)}
 
-        Returns
-        -------
-        updated : Dataset
-            Updated dataset. Note that since the update is in-place this is the input
-            dataset.
-
-            It is deprecated since version 0.17 and scheduled to be removed in 0.21.
-
         Raises
         ------
         ValueError
@@ -5511,14 +5629,14 @@ class Dataset(
         Dataset.merge
         """
         merge_result = dataset_update_method(self, other)
-        return self._replace(inplace=True, **merge_result._asdict())
+        self._replace(inplace=True, **merge_result._asdict())
 
     def merge(
         self,
         other: CoercibleMapping | DataArray,
         overwrite_vars: Hashable | Iterable[Hashable] = frozenset(),
-        compat: CompatOptions = "no_conflicts",
-        join: JoinOptions = "outer",
+        compat: CompatOptions | CombineKwargDefault = _COMPAT_DEFAULT,
+        join: JoinOptions | CombineKwargDefault = _JOIN_DEFAULT,
         fill_value: Any = xrdtypes.NA,
         combine_attrs: CombineAttrsOptions = "override",
     ) -> Self:
@@ -5768,11 +5886,10 @@ class Dataset(
                 other_names.update(idx_other_names)
         if other_names:
             names_set |= set(other_names)
-            warnings.warn(
+            emit_user_level_warning(
                 f"Deleting a single level of a MultiIndex is deprecated. Previously, this deleted all levels of a MultiIndex. "
                 f"Please also drop the following variables: {other_names!r} to avoid an error in the future.",
                 DeprecationWarning,
-                stacklevel=2,
             )
 
         assert_no_index_corrupted(self.xindexes, names_set)
@@ -6730,34 +6847,32 @@ class Dataset(
             if name in self.coords:
                 if not reduce_dims:
                     variables[name] = var
-            else:
-                if (
-                    # Some reduction functions (e.g. std, var) need to run on variables
-                    # that don't have the reduce dims: PR5393
-                    not is_extension_array_dtype(var.dtype)
-                    and (
-                        not reduce_dims
-                        or not numeric_only
-                        or np.issubdtype(var.dtype, np.number)
-                        or (var.dtype == np.bool_)
-                    )
-                ):
-                    # prefer to aggregate over axis=None rather than
-                    # axis=(0, 1) if they will be equivalent, because
-                    # the former is often more efficient
-                    # keep single-element dims as list, to support Hashables
-                    reduce_maybe_single = (
-                        None
-                        if len(reduce_dims) == var.ndim and var.ndim != 1
-                        else reduce_dims
-                    )
-                    variables[name] = var.reduce(
-                        func,
-                        dim=reduce_maybe_single,
-                        keep_attrs=keep_attrs,
-                        keepdims=keepdims,
-                        **kwargs,
-                    )
+            elif (
+                # Some reduction functions (e.g. std, var) need to run on variables
+                # that don't have the reduce dims: PR5393
+                not pd.api.types.is_extension_array_dtype(var.dtype)  # noqa: TID251
+                and (
+                    not reduce_dims
+                    or not numeric_only
+                    or _is_numeric_aggregatable_dtype(var)
+                )
+            ):
+                # prefer to aggregate over axis=None rather than
+                # axis=(0, 1) if they will be equivalent, because
+                # the former is often more efficient
+                # keep single-element dims as list, to support Hashables
+                reduce_maybe_single = (
+                    None
+                    if len(reduce_dims) == var.ndim and var.ndim != 1
+                    else reduce_dims
+                )
+                variables[name] = var.reduce(
+                    func,
+                    dim=reduce_maybe_single,
+                    keep_attrs=keep_attrs,
+                    keepdims=keepdims,
+                    **kwargs,
+                )
 
         coord_names = {k for k in self.coords if k in variables}
         indexes = {k: v for k, v in self._indexes.items() if k in variables}
@@ -6820,11 +6935,22 @@ class Dataset(
             k: maybe_wrap_array(v, func(v, *args, **kwargs))
             for k, v in self.data_vars.items()
         }
+        coord_vars, indexes = merge_coordinates_without_align(
+            [v.coords for v in variables.values()]
+        )
+        coords = Coordinates._construct_direct(coords=coord_vars, indexes=indexes)
+
         if keep_attrs:
             for k, v in variables.items():
                 v._copy_attrs_from(self.data_vars[k])
+
+            for k, v in coords.items():
+                if k not in self.coords:
+                    continue
+                v._copy_attrs_from(self.coords[k])
+
         attrs = self.attrs if keep_attrs else None
-        return type(self)(variables, attrs=attrs)
+        return type(self)(variables, coords=coords, attrs=attrs)
 
     def apply(
         self,
@@ -7053,16 +7179,31 @@ class Dataset(
         )
 
     def _to_dataframe(self, ordered_dims: Mapping[Any, int]):
-        columns_in_order = [k for k in self.variables if k not in self.dims]
+        from xarray.core.extension_array import PandasExtensionArray
+
+        # All and only non-index arrays (whether data or coordinates) should
+        # become columns in the output DataFrame. Excluding indexes rather
+        # than dims handles the case of a MultiIndex along a single dimension.
+        columns_in_order = [k for k in self.variables if k not in self.xindexes]
         non_extension_array_columns = [
             k
             for k in columns_in_order
-            if not is_extension_array_dtype(self.variables[k].data)
+            if not pd.api.types.is_extension_array_dtype(self.variables[k].data)  # noqa: TID251
         ]
         extension_array_columns = [
             k
             for k in columns_in_order
-            if is_extension_array_dtype(self.variables[k].data)
+            if pd.api.types.is_extension_array_dtype(self.variables[k].data)  # noqa: TID251
+        ]
+        extension_array_columns_different_index = [
+            k
+            for k in extension_array_columns
+            if set(self.variables[k].dims) != set(ordered_dims.keys())
+        ]
+        extension_array_columns_same_index = [
+            k
+            for k in extension_array_columns
+            if k not in extension_array_columns_different_index
         ]
         data = [
             self._variables[k].set_dims(ordered_dims).values.reshape(-1)
@@ -7070,14 +7211,25 @@ class Dataset(
         ]
         index = self.coords.to_index([*ordered_dims])
         broadcasted_df = pd.DataFrame(
-            dict(zip(non_extension_array_columns, data, strict=True)), index=index
-        )
-        for extension_array_column in extension_array_columns:
-            extension_array = self.variables[extension_array_column].data.array
-            index = self[self.variables[extension_array_column].dims[0]].data
+            {
+                **dict(zip(non_extension_array_columns, data, strict=True)),
+                **{
+                    c: self.variables[c].data
+                    for c in extension_array_columns_same_index
+                },
+            },
+            index=index,
+        )
+        for extension_array_column in extension_array_columns_different_index:
+            extension_array = self.variables[extension_array_column].data
+            index = self[
+                self.variables[extension_array_column].dims[0]
+            ].coords.to_index()
             extension_array_df = pd.DataFrame(
                 {extension_array_column: extension_array},
-                index=self[self.variables[extension_array_column].dims[0]].data,
+                index=pd.Index(index.array)
+                if isinstance(index, PandasExtensionArray)  # type: ignore[redundant-expr]
+                else index,
             )
             extension_array_df.index.name = self.variables[extension_array_column].dims[
                 0
@@ -7233,8 +7385,8 @@ class Dataset(
         arrays = []
         extension_arrays = []
         for k, v in dataframe.items():
-            if not is_extension_array_dtype(v) or isinstance(
-                v.array, pd.arrays.DatetimeArray | pd.arrays.TimedeltaArray
+            if not is_allowed_extension_array(v) or isinstance(
+                v.array, UNSUPPORTED_EXTENSION_ARRAY_TYPES
             ):
                 arrays.append((k, np.asarray(v)))
             else:
@@ -7245,7 +7397,7 @@ class Dataset(
 
         if isinstance(idx, pd.MultiIndex):
             dims = tuple(
-                name if name is not None else f"level_{n}"  # type: ignore[redundant-expr]
+                name if name is not None else f"level_{n}"  # type: ignore[redundant-expr,unused-ignore]
                 for n, name in enumerate(idx.names)
             )
             for dim, lev in zip(dims, idx.levels, strict=True):
@@ -7933,8 +8085,6 @@ class Dataset(
             variables = variables(self)
         if not isinstance(variables, list):
             variables = [variables]
-        else:
-            variables = variables
         arrays = [v if isinstance(v, DataArray) else self[v] for v in variables]
         aligned_vars = align(self, *arrays, join="left")
         aligned_self = cast("Self", aligned_vars[0])
@@ -8058,8 +8208,8 @@ class Dataset(
         <xarray.Dataset> Size: 152B
         Dimensions:   (quantile: 3, y: 4)
         Coordinates:
-          * y         (y) float64 32B 1.0 1.5 2.0 2.5
           * quantile  (quantile) float64 24B 0.0 0.5 1.0
+          * y         (y) float64 32B 1.0 1.5 2.0 2.5
         Data variables:
             a         (quantile, y) float64 96B 0.7 4.2 2.6 1.5 3.6 ... 6.5 7.3 9.4 1.9
 
@@ -8104,19 +8254,18 @@ class Dataset(
         for name, var in self.variables.items():
             reduce_dims = [d for d in var.dims if d in dims]
             if reduce_dims or not var.dims:
-                if name not in self.coords:
-                    if (
-                        not numeric_only
-                        or np.issubdtype(var.dtype, np.number)
-                        or var.dtype == np.bool_
-                    ):
-                        variables[name] = var.quantile(
-                            q,
-                            dim=reduce_dims,
-                            method=method,
-                            keep_attrs=keep_attrs,
-                            skipna=skipna,
-                        )
+                if name not in self.coords and (
+                    not numeric_only
+                    or np.issubdtype(var.dtype, np.number)
+                    or var.dtype == np.bool_
+                ):
+                    variables[name] = var.quantile(
+                        q,
+                        dim=reduce_dims,
+                        method=method,
+                        keep_attrs=keep_attrs,
+                        skipna=skipna,
+                    )
 
             else:
                 variables[name] = var
@@ -8210,7 +8359,7 @@ class Dataset(
             The coordinate to be used to compute the gradient.
         edge_order : {1, 2}, default: 1
             N-th order accurate differences at the boundaries.
-        datetime_unit : None or {"Y", "M", "W", "D", "h", "m", "s", "ms", \
+        datetime_unit : None or {"W", "D", "h", "m", "s", "ms", \
             "us", "ns", "ps", "fs", "as", None}, default: None
             Unit to compute gradient. Only valid for datetime coordinate.
 
@@ -8222,8 +8371,6 @@ class Dataset(
         --------
         numpy.gradient: corresponding numpy function
         """
-        from xarray.core.variable import Variable
-
         if coord not in self.variables and coord not in self.dims:
             variables_and_dims = tuple(set(self.variables.keys()).union(self.dims))
             raise ValueError(
@@ -8278,7 +8425,7 @@ class Dataset(
         ----------
         coord : hashable, or sequence of hashable
             Coordinate(s) used for the integration.
-        datetime_unit : {'Y', 'M', 'W', 'D', 'h', 'm', 's', 'ms', 'us', 'ns', \
+        datetime_unit : {'W', 'D', 'h', 'm', 's', 'ms', 'us', 'ns', \
                         'ps', 'fs', 'as', None}, optional
             Specify the unit if datetime coordinate is used.
 
@@ -8327,8 +8474,6 @@ class Dataset(
         return result
 
     def _integrate_one(self, coord, datetime_unit=None, cumulative=False):
-        from xarray.core.variable import Variable
-
         if coord not in self.variables and coord not in self.dims:
             variables_and_dims = tuple(set(self.variables.keys()).union(self.dims))
             raise ValueError(
@@ -8359,25 +8504,24 @@ class Dataset(
                 if dim not in v.dims or cumulative:
                     variables[k] = v
                     coord_names.add(k)
-            else:
-                if k in self.data_vars and dim in v.dims:
-                    coord_data = to_like_array(coord_var.data, like=v.data)
-                    if _contains_datetime_like_objects(v):
-                        v = datetime_to_numeric(v, datetime_unit=datetime_unit)
-                    if cumulative:
-                        integ = duck_array_ops.cumulative_trapezoid(
-                            v.data, coord_data, axis=v.get_axis_num(dim)
-                        )
-                        v_dims = v.dims
-                    else:
-                        integ = duck_array_ops.trapz(
-                            v.data, coord_data, axis=v.get_axis_num(dim)
-                        )
-                        v_dims = list(v.dims)
-                        v_dims.remove(dim)
-                    variables[k] = Variable(v_dims, integ)
+            elif k in self.data_vars and dim in v.dims:
+                coord_data = to_like_array(coord_var.data, like=v.data)
+                if _contains_datetime_like_objects(v):
+                    v = datetime_to_numeric(v, datetime_unit=datetime_unit)
+                if cumulative:
+                    integ = duck_array_ops.cumulative_trapezoid(
+                        v.data, coord_data, axis=v.get_axis_num(dim)
+                    )
+                    v_dims = v.dims
                 else:
-                    variables[k] = v
+                    integ = duck_array_ops.trapz(
+                        v.data, coord_data, axis=v.get_axis_num(dim)
+                    )
+                    v_dims = list(v.dims)
+                    v_dims.remove(dim)
+                variables[k] = Variable(v_dims, integ)
+            else:
+                variables[k] = v
         indexes = {k: v for k, v in self._indexes.items() if k in variables}
         return self._replace_with_new_dims(
             variables, coord_names=coord_names, indexes=indexes
@@ -8402,7 +8546,7 @@ class Dataset(
         ----------
         coord : hashable, or sequence of hashable
             Coordinate(s) used for the integration.
-        datetime_unit : {'Y', 'M', 'W', 'D', 'h', 'm', 's', 'ms', 'us', 'ns', \
+        datetime_unit : {'W', 'D', 'h', 'm', 's', 'ms', 'us', 'ns', \
                         'ps', 'fs', 'as', None}, optional
             Specify the unit if datetime coordinate is used.
 
@@ -8538,9 +8682,9 @@ class Dataset(
         <xarray.Dataset> Size: 192B
         Dimensions:         (x: 2, y: 2, time: 3)
         Coordinates:
+          * time            (time) datetime64[ns] 24B 2014-09-06 2014-09-07 2014-09-08
             lon             (x, y) float64 32B -99.83 -99.32 -99.79 -99.23
             lat             (x, y) float64 32B 42.25 42.21 42.63 42.59
-          * time            (time) datetime64[ns] 24B 2014-09-06 2014-09-07 2014-09-08
             reference_time  datetime64[ns] 8B 2014-09-05
         Dimensions without coordinates: x, y
         Data variables:
@@ -8553,9 +8697,9 @@ class Dataset(
         <xarray.Dataset> Size: 288B
         Dimensions:         (x: 2, y: 2, time: 3)
         Coordinates:
+          * time            (time) datetime64[ns] 24B 2014-09-06 2014-09-07 2014-09-08
             lon             (x, y) float64 32B -99.83 -99.32 -99.79 -99.23
             lat             (x, y) float64 32B 42.25 42.21 42.63 42.59
-          * time            (time) datetime64[ns] 24B 2014-09-06 2014-09-07 2014-09-08
             reference_time  datetime64[ns] 8B 2014-09-05
         Dimensions without coordinates: x, y
         Data variables:
@@ -9226,8 +9370,8 @@ class Dataset(
         <xarray.DataArray 'student' (test: 3)> Size: 84B
         array(['Bob', 'Bob', 'Alice'], dtype='<U7')
         Coordinates:
-            student  (test) <U7 84B 'Bob' 'Bob' 'Alice'
           * test     (test) <U6 72B 'Test 1' 'Test 2' 'Test 3'
+            student  (test) <U7 84B 'Bob' 'Bob' 'Alice'
 
         >>> min_score_in_english = dataset["student"].isel(
         ...     student=argmin_indices["english_scores"]
@@ -9236,8 +9380,8 @@ class Dataset(
         <xarray.DataArray 'student' (test: 3)> Size: 84B
         array(['Charlie', 'Bob', 'Charlie'], dtype='<U7')
         Coordinates:
-            student  (test) <U7 84B 'Charlie' 'Bob' 'Charlie'
           * test     (test) <U6 72B 'Test 1' 'Test 2' 'Test 3'
+            student  (test) <U7 84B 'Charlie' 'Bob' 'Charlie'
 
         See Also
         --------
@@ -9537,7 +9681,7 @@ class Dataset(
         """
         Curve fitting optimization for arbitrary functions.
 
-        Wraps `scipy.optimize.curve_fit` with `apply_ufunc`.
+        Wraps :py:func:`scipy.optimize.curve_fit` with :py:func:`~xarray.apply_ufunc`.
 
         Parameters
         ----------
@@ -9597,6 +9741,9 @@ class Dataset(
         --------
         Dataset.polyfit
         scipy.optimize.curve_fit
+        xarray.Dataset.xlm.modelfit
+            External method from `xarray-lmfit <https://xarray-lmfit.readthedocs.io/>`_
+            with more curve fitting functionality.
         """
         from xarray.computation.fit import curvefit as curvefit_impl
 
@@ -9661,7 +9808,7 @@ class Dataset(
         self,
         calendar: CFCalendar,
         dim: Hashable = "time",
-        align_on: Literal["date", "year", None] = None,
+        align_on: Literal["date", "year"] | None = None,
         missing: Any | None = None,
         use_cftime: bool | None = None,
     ) -> Self:
@@ -9818,7 +9965,7 @@ class Dataset(
         *,
         squeeze: Literal[False] = False,
         restore_coord_dims: bool = False,
-        eagerly_compute_group: bool = True,
+        eagerly_compute_group: Literal[False] | None = None,
         **groupers: Grouper,
     ) -> DatasetGroupBy:
         """Returns a DatasetGroupBy object for performing grouped operations.
@@ -9834,11 +9981,8 @@ class Dataset(
         restore_coord_dims : bool, default: False
             If True, also restore the dimension order of multi-dimensional
             coordinates.
-        eagerly_compute_group: bool
-            Whether to eagerly compute ``group`` when it is a chunked array.
-            This option is to maintain backwards compatibility. Set to False
-            to opt-in to future behaviour, where ``group`` is not automatically loaded
-            into memory.
+        eagerly_compute_group: False, optional
+            This argument is deprecated.
         **groupers : Mapping of str to Grouper or Resampler
             Mapping of variable name to group by to :py:class:`Grouper` or :py:class:`Resampler` object.
             One of ``group`` or ``groupers`` must be provided.
@@ -9861,7 +10005,7 @@ class Dataset(
 
         >>> ds.groupby("letters")
         <DatasetGroupBy, grouped over 1 grouper(s), 2 groups in total:
-            'letters': 2/2 groups present with labels 'a', 'b'>
+            'letters': UniqueGrouper('letters'), 2/2 groups with labels 'a', 'b'>
 
         Execute a reduction
 
@@ -9878,18 +10022,18 @@ class Dataset(
 
         >>> ds.groupby(["letters", "x"])
         <DatasetGroupBy, grouped over 2 grouper(s), 8 groups in total:
-            'letters': 2/2 groups present with labels 'a', 'b'
-            'x': 4/4 groups present with labels 10, 20, 30, 40>
+            'letters': UniqueGrouper('letters'), 2/2 groups with labels 'a', 'b'
+            'x': UniqueGrouper('x'), 4/4 groups with labels 10, 20, 30, 40>
 
         Use Grouper objects to express more complicated GroupBy operations
 
         >>> from xarray.groupers import BinGrouper, UniqueGrouper
         >>>
         >>> ds.groupby(x=BinGrouper(bins=[5, 15, 25]), letters=UniqueGrouper()).sum()
-        <xarray.Dataset> Size: 128B
+        <xarray.Dataset> Size: 144B
         Dimensions:  (y: 3, x_bins: 2, letters: 2)
         Coordinates:
-          * x_bins   (x_bins) object 16B (5, 15] (15, 25]
+          * x_bins   (x_bins) interval[int64, right] 32B (5, 15] (15, 25]
           * letters  (letters) object 16B 'a' 'b'
         Dimensions without coordinates: y
         Data variables:
@@ -9900,7 +10044,7 @@ class Dataset(
         :ref:`groupby`
             Users guide explanation of how to group and bin data.
 
-        :doc:`xarray-tutorial:intermediate/01-high-level-computation-patterns`
+        :doc:`xarray-tutorial:intermediate/computation/01-high-level-computation-patterns`
             Tutorial on :py:func:`~xarray.Dataset.Groupby` for windowed computation.
 
         :doc:`xarray-tutorial:fundamentals/03.2_groupby_with_xarray`
@@ -9939,7 +10083,7 @@ class Dataset(
         squeeze: Literal[False] = False,
         restore_coord_dims: bool = False,
         duplicates: Literal["raise", "drop"] = "raise",
-        eagerly_compute_group: bool = True,
+        eagerly_compute_group: Literal[False] | None = None,
     ) -> DatasetGroupBy:
         """Returns a DatasetGroupBy object for performing grouped operations.
 
@@ -9976,11 +10120,8 @@ class Dataset(
             coordinates.
         duplicates : {"raise", "drop"}, default: "raise"
             If bin edges are not unique, raise ValueError or drop non-uniques.
-        eagerly_compute_group: bool
-            Whether to eagerly compute ``group`` when it is a chunked array.
-            This option is to maintain backwards compatibility. Set to False
-            to opt-in to future behaviour, where ``group`` is not automatically loaded
-            into memory.
+        eagerly_compute_group: False, optional
+            This argument is deprecated.
 
         Returns
         -------
@@ -10046,13 +10187,13 @@ class Dataset(
 
         Returns
         -------
-        core.weighted.DatasetWeighted
+        computation.weighted.DatasetWeighted
 
         See Also
         --------
         :func:`DataArray.weighted <DataArray.weighted>`
 
-        :ref:`comput.weighted`
+        :ref:`compute.weighted`
             User guide on weighted array reduction using :py:func:`~xarray.Dataset.weighted`
 
         :doc:`xarray-tutorial:fundamentals/03.4_weighted`
@@ -10091,7 +10232,7 @@ class Dataset(
 
         Returns
         -------
-        core.rolling.DatasetRolling
+        computation.rolling.DatasetRolling
 
         See Also
         --------
@@ -10123,7 +10264,7 @@ class Dataset(
 
         Returns
         -------
-        core.rolling.DatasetRolling
+        computation.rolling.DatasetRolling
 
         See Also
         --------
@@ -10175,11 +10316,11 @@ class Dataset(
 
         Returns
         -------
-        core.rolling.DatasetCoarsen
+        computation.rolling.DatasetCoarsen
 
         See Also
         --------
-        :class:`core.rolling.DatasetCoarsen`
+        :class:`computation.rolling.DatasetCoarsen`
         :func:`DataArray.coarsen <DataArray.coarsen>`
 
         :ref:`reshape.coarsen`
diff -pruN 2025.03.1-8/xarray/core/datatree.py 2025.10.1-1/xarray/core/datatree.py
--- 2025.03.1-8/xarray/core/datatree.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/xarray/core/datatree.py	2025-10-10 10:38:05.000000000 +0000
@@ -1,9 +1,10 @@
 from __future__ import annotations
 
 import functools
+import io
 import itertools
 import textwrap
-from collections import ChainMap
+from collections import ChainMap, defaultdict
 from collections.abc import (
     Callable,
     Hashable,
@@ -11,13 +12,17 @@ from collections.abc import (
     Iterator,
     Mapping,
 )
+from dataclasses import dataclass, field
 from html import escape
+from os import PathLike
 from typing import (
     TYPE_CHECKING,
     Any,
     Concatenate,
+    Literal,
     NoReturn,
     ParamSpec,
+    TypeAlias,
     TypeVar,
     Union,
     overload,
@@ -45,6 +50,7 @@ from xarray.core.formatting_html import
 )
 from xarray.core.indexes import Index, Indexes
 from xarray.core.options import OPTIONS as XR_OPTS
+from xarray.core.options import _get_keep_attrs
 from xarray.core.treenode import NamedNode, NodePath, zip_subtrees
 from xarray.core.types import Self
 from xarray.core.utils import (
@@ -72,13 +78,16 @@ except ImportError:
 if TYPE_CHECKING:
     import numpy as np
     import pandas as pd
+    from dask.delayed import Delayed
 
-    from xarray.core.datatree_io import T_DataTreeNetcdfEngine, T_DataTreeNetcdfTypes
+    from xarray.backends import ZarrStore
+    from xarray.backends.writers import T_DataTreeNetcdfEngine, T_DataTreeNetcdfTypes
     from xarray.core.types import (
         Dims,
         DtCompatible,
         ErrorOptions,
         ErrorOptionsWithWarn,
+        NestedDict,
         NetcdfWriteModes,
         T_ChunkDimFreq,
         T_ChunksFreq,
@@ -346,9 +355,9 @@ class DatasetView(Dataset):
         variables: dict[Hashable, Variable] | None = None,
         coord_names: set[Hashable] | None = None,
         dims: dict[Any, int] | None = None,
-        attrs: dict[Hashable, Any] | None | Default = _default,
+        attrs: dict[Hashable, Any] | Default | None = _default,
         indexes: dict[Hashable, Index] | None = None,
-        encoding: dict | None | Default = _default,
+        encoding: dict | Default | None = _default,
         inplace: bool = False,
     ) -> Dataset:
         """
@@ -421,17 +430,36 @@ class DatasetView(Dataset):
 
         # Copied from xarray.Dataset so as not to call type(self), which causes problems (see https://github.com/xarray-contrib/datatree/issues/188).
         # TODO Refactor xarray upstream to avoid needing to overwrite this.
-        # TODO This copied version will drop all attrs - the keep_attrs stuff should be re-instated
+        if keep_attrs is None:
+            keep_attrs = _get_keep_attrs(default=False)
         variables = {
             k: maybe_wrap_array(v, func(v, *args, **kwargs))
             for k, v in self.data_vars.items()
         }
+        if keep_attrs:
+            for k, v in variables.items():
+                v._copy_attrs_from(self.data_vars[k])
+        attrs = self.attrs if keep_attrs else None
         # return type(self)(variables, attrs=attrs)
-        return Dataset(variables)
+        return Dataset(variables, attrs=attrs)
+
+
+FromDictDataValue: TypeAlias = "CoercibleValue | Dataset | DataTree | None"
+
+
+@dataclass
+class _CoordWrapper:
+    value: CoercibleValue
+
+
+@dataclass
+class _DatasetArgs:
+    data_vars: dict[str, CoercibleValue] = field(default_factory=dict)
+    coords: dict[str, CoercibleValue] = field(default_factory=dict)
 
 
 class DataTree(
-    NamedNode["DataTree"],
+    NamedNode,
     DataTreeAggregations,
     DataTreeOpsMixin,
     TreeAttrAccessMixin,
@@ -551,9 +579,12 @@ class DataTree(
 
     @property
     def _coord_variables(self) -> ChainMap[Hashable, Variable]:
+        # ChainMap is incorrected typed in typeshed (only the first argument
+        # needs to be mutable)
+        # https://github.com/python/typeshed/issues/8430
         return ChainMap(
             self._node_coord_variables,
-            *(p._node_coord_variables_with_index for p in self.parents),
+            *(p._node_coord_variables_with_index for p in self.parents),  # type: ignore[arg-type]
         )
 
     @property
@@ -804,7 +835,7 @@ class DataTree(
         return itertools.chain(self._data_variables, self._children)  # type: ignore[arg-type]
 
     def __array__(
-        self, dtype: np.typing.DTypeLike = None, /, *, copy: bool | None = None
+        self, dtype: np.typing.DTypeLike | None = None, /, *, copy: bool | None = None
     ) -> np.ndarray:
         raise TypeError(
             "cannot directly convert a DataTree into a "
@@ -1140,51 +1171,215 @@ class DataTree(
         result._replace_node(children=children_to_keep)
         return result
 
+    @overload
+    @classmethod
+    def from_dict(
+        cls,
+        data: Mapping[str, FromDictDataValue] | None = ...,
+        coords: Mapping[str, CoercibleValue] | None = ...,
+        *,
+        name: str | None = ...,
+        nested: Literal[False] = ...,
+    ) -> Self: ...
+
+    @overload
+    @classmethod
+    def from_dict(
+        cls,
+        data: (
+            Mapping[str, FromDictDataValue | NestedDict[FromDictDataValue]] | None
+        ) = ...,
+        coords: Mapping[str, CoercibleValue | NestedDict[CoercibleValue]] | None = ...,
+        *,
+        name: str | None = ...,
+        nested: Literal[True] = ...,
+    ) -> Self: ...
+
     @classmethod
     def from_dict(
         cls,
-        d: Mapping[str, Dataset | DataTree | None],
-        /,
+        data: (
+            Mapping[str, FromDictDataValue | NestedDict[FromDictDataValue]] | None
+        ) = None,
+        coords: Mapping[str, CoercibleValue | NestedDict[CoercibleValue]] | None = None,
+        *,
         name: str | None = None,
+        nested: bool = False,
     ) -> Self:
         """
         Create a datatree from a dictionary of data objects, organised by paths into the tree.
 
         Parameters
         ----------
-        d : dict-like
-            A mapping from path names to xarray.Dataset or DataTree objects.
-
-            Path names are to be given as unix-like path. If path names
-            containing more than one part are given, new tree nodes will be
-            constructed as necessary.
+        data : dict-like, optional
+            A mapping from path names to ``None`` (indicating an empty node),
+            ``DataTree``, ``Dataset``, objects coercible into a ``DataArray`` or
+            a nested dictionary of any of the above types.
+
+            Path names should be given as unix-like paths, either absolute
+            (/path/to/item) or relative to the root node (path/to/item). If path
+            names containing more than one part are given, new tree nodes will
+            be constructed automatically as necessary.
 
             To assign data to the root node of the tree use "", ".", "/" or "./"
             as the path.
+        coords : dict-like, optional
+            A mapping from path names to objects coercible into a DataArray, or
+            nested dictionaries of coercible objects.
         name : Hashable | None, optional
             Name for the root node of the tree. Default is None.
+        nested : bool, optional
+            If true, nested dictionaries in ``data`` and ``coords`` are
+            automatically flattened.
 
         Returns
         -------
         DataTree
 
+        See also
+        --------
+        Dataset
+
         Notes
         -----
-        If your dictionary is nested you will need to flatten it before using this method.
-        """
-        # Find any values corresponding to the root
-        d_cast = dict(d)
-        root_data = None
-        for key in ("", ".", "/", "./"):
-            if key in d_cast:
-                if root_data is not None:
+        ``DataTree.from_dict`` serves a conceptually different purpose from
+        ``Dataset.from_dict`` and ``DataArray.from_dict``. It converts a
+        hierarchy of Xarray objects into a DataTree, rather than converting pure
+        Python data structures.
+
+        Examples
+        --------
+
+        Construct a tree from a dict of Dataset objects:
+
+        >>> dt = DataTree.from_dict(
+        ...     {
+        ...         "/": Dataset(coords={"time": [1, 2, 3]}),
+        ...         "/ocean": Dataset(
+        ...             {
+        ...                 "temperature": ("time", [4, 5, 6]),
+        ...                 "salinity": ("time", [7, 8, 9]),
+        ...             }
+        ...         ),
+        ...         "/atmosphere": Dataset(
+        ...             {
+        ...                 "temperature": ("time", [2, 3, 4]),
+        ...                 "humidity": ("time", [3, 4, 5]),
+        ...             }
+        ...         ),
+        ...     }
+        ... )
+        >>> dt
+        <xarray.DataTree>
+        Group: /
+        │   Dimensions:  (time: 3)
+        │   Coordinates:
+        │     * time     (time) int64 24B 1 2 3
+        ├── Group: /ocean
+        │       Dimensions:      (time: 3)
+        │       Data variables:
+        │           temperature  (time) int64 24B 4 5 6
+        │           salinity     (time) int64 24B 7 8 9
+        └── Group: /atmosphere
+                Dimensions:      (time: 3)
+                Data variables:
+                    temperature  (time) int64 24B 2 3 4
+                    humidity     (time) int64 24B 3 4 5
+
+        Or equivalently, use a dict of values that can be converted into
+        `DataArray` objects, with syntax similar to the Dataset constructor:
+
+        >>> dt2 = DataTree.from_dict(
+        ...     data={
+        ...         "/ocean/temperature": ("time", [4, 5, 6]),
+        ...         "/ocean/salinity": ("time", [7, 8, 9]),
+        ...         "/atmosphere/temperature": ("time", [2, 3, 4]),
+        ...         "/atmosphere/humidity": ("time", [3, 4, 5]),
+        ...     },
+        ...     coords={"/time": [1, 2, 3]},
+        ... )
+        >>> assert dt.identical(dt2)
+
+        Nested dictionaries are automatically flattened if ``nested=True``:
+
+        >>> DataTree.from_dict({"a": {"b": {"c": {"x": 1, "y": 2}}}}, nested=True)
+        <xarray.DataTree>
+        Group: /
+        └── Group: /a
+            └── Group: /a/b
+                └── Group: /a/b/c
+                        Dimensions:  ()
+                        Data variables:
+                            x        int64 8B 1
+                            y        int64 8B 2
+
+        """
+        if data is None:
+            data = {}
+
+        if coords is None:
+            coords = {}
+
+        if nested:
+            data_items = utils.flat_items(data)
+            coords_items = utils.flat_items(coords)
+        else:
+            data_items = data.items()
+            coords_items = coords.items()
+            for arg_name, items in [("data", data_items), ("coords", coords_items)]:
+                for key, value in items:
+                    if isinstance(value, dict):
+                        raise TypeError(
+                            f"{arg_name} contains a dict value at {key=}, "
+                            "which is not a valid argument to "
+                            f"DataTree.from_dict() with nested=False: {value}"
+                        )
+
+        # Canonicalize and unify paths between `data` and `coords`
+        flat_data_and_coords = itertools.chain(
+            data_items,
+            ((k, _CoordWrapper(v)) for k, v in coords_items),
+        )
+        nodes: dict[NodePath, _CoordWrapper | FromDictDataValue] = {}
+        for key, value in flat_data_and_coords:
+            path = NodePath(key).absolute()
+            if path in nodes:
+                raise ValueError(
+                    f"multiple entries found corresponding to node {str(path)!r}"
+                )
+            nodes[path] = value
+
+        # Merge nodes corresponding to DataArrays into Datasets
+        dataset_args: defaultdict[NodePath, _DatasetArgs] = defaultdict(_DatasetArgs)
+        for path in list(nodes):
+            node = nodes[path]
+            if node is not None and not isinstance(node, Dataset | DataTree):
+                if path.parent == path:
+                    raise ValueError("cannot set DataArray value at root")
+                if path.parent in nodes:
                     raise ValueError(
-                        "multiple entries found corresponding to the root node"
+                        f"cannot set DataArray value at {str(path)!r} when "
+                        f"parent node at {str(path.parent)!r} is also set"
                     )
-                root_data = d_cast.pop(key)
+                del nodes[path]
+                if isinstance(node, _CoordWrapper):
+                    dataset_args[path.parent].coords[path.name] = node.value
+                else:
+                    dataset_args[path.parent].data_vars[path.name] = node
+        for path, args in dataset_args.items():
+            try:
+                nodes[path] = Dataset(args.data_vars, args.coords)
+            except (ValueError, TypeError) as e:
+                raise type(e)(
+                    "failed to construct xarray.Dataset for DataTree node at "
+                    f"{str(path)!r} with data_vars={args.data_vars} and "
+                    f"coords={args.coords}"
+                ) from e
 
         # Create the root node
-        if isinstance(root_data, DataTree):
+        root_data = nodes.pop(NodePath("/"), None)
+        if isinstance(root_data, cls):
+            # use cls so type-checkers understand this method returns Self
             obj = root_data.copy()
             obj.name = name
         elif root_data is None or isinstance(root_data, Dataset):
@@ -1195,21 +1390,21 @@ class DataTree(
                 f"or DataTree, got {type(root_data)}"
             )
 
-        def depth(item) -> int:
-            pathstr, _ = item
-            return len(NodePath(pathstr).parts)
+        def depth(item: tuple[NodePath, object]) -> int:
+            node_path, _ = item
+            return len(node_path.parts)
 
-        if d_cast:
-            # Populate tree with children determined from data_objects mapping
+        if nodes:
+            # Populate tree with children
             # Sort keys by depth so as to insert nodes from root first (see GH issue #9276)
-            for path, data in sorted(d_cast.items(), key=depth):
+            for path, node in sorted(nodes.items(), key=depth):
                 # Create and set new node
-                if isinstance(data, DataTree):
-                    new_node = data.copy()
-                elif isinstance(data, Dataset) or data is None:
-                    new_node = cls(dataset=data)
+                if isinstance(node, DataTree):
+                    new_node = node.copy()
+                elif isinstance(node, Dataset) or node is None:
+                    new_node = cls(dataset=node)
                 else:
-                    raise TypeError(f"invalid values: {data}")
+                    raise TypeError(f"invalid values: {node}")
                 obj._set_item(
                     path,
                     new_node,
@@ -1217,9 +1412,7 @@ class DataTree(
                     new_nodes_along_path=True,
                 )
 
-        # TODO: figure out why mypy is raising an error here, likely something
-        # to do with the return type of Dataset.copy()
-        return obj  # type: ignore[return-value]
+        return obj
 
     def to_dict(self, relative: bool = False) -> dict[str, Dataset]:
         """
@@ -1332,7 +1525,7 @@ class DataTree(
         )
 
     def _inherited_coords_set(self) -> set[str]:
-        return set(self.parent.coords if self.parent else [])
+        return set(self.parent.coords if self.parent else [])  # type: ignore[arg-type]
 
     def identical(self, other: DataTree) -> bool:
         """
@@ -1442,6 +1635,73 @@ class DataTree(
         other_keys = {key for key, _ in other.subtree_with_keys}
         return self.filter(lambda node: node.relative_to(self) in other_keys)
 
+    def prune(self, drop_size_zero_vars: bool = False) -> DataTree:
+        """
+        Remove empty nodes from the tree.
+
+        Returns a new tree containing only nodes that contain data variables with actual data.
+        Intermediate nodes are kept if they are required to support non-empty children.
+
+        Parameters
+        ----------
+        drop_size_zero_vars : bool, default False
+            If True, also considers variables with zero size as empty.
+            If False, keeps nodes with data variables even if they have zero size.
+
+        Returns
+        -------
+        DataTree
+            A new tree with empty nodes removed.
+
+        See Also
+        --------
+        filter
+
+        Examples
+        --------
+        >>> dt = xr.DataTree.from_dict(
+        ...     {
+        ...         "/a": xr.Dataset({"foo": ("x", [1, 2])}),
+        ...         "/b": xr.Dataset({"bar": ("x", [])}),
+        ...         "/c": xr.Dataset(),
+        ...     }
+        ... )
+        >>> dt.prune()  # doctest: +ELLIPSIS,+NORMALIZE_WHITESPACE
+        <xarray.DataTree>
+        Group: /
+        ├── Group: /a
+        │       Dimensions:  (x: 2)
+        │       Dimensions without coordinates: x
+        │       Data variables:
+        │           foo      (x) int64 16B 1 2
+        └── Group: /b
+                Dimensions:  (x: 0)
+                Dimensions without coordinates: x
+                Data variables:
+                    bar      (x) float64 0B...
+
+        The ``drop_size_zero_vars`` parameter controls whether variables
+        with zero size are considered empty:
+
+        >>> dt.prune(drop_size_zero_vars=True)
+        <xarray.DataTree>
+        Group: /
+        └── Group: /a
+                Dimensions:  (x: 2)
+                Dimensions without coordinates: x
+                Data variables:
+                    foo      (x) int64 16B 1 2
+        """
+        non_empty_cond: Callable[[DataTree], bool]
+        if drop_size_zero_vars:
+            non_empty_cond = lambda node: len(node.data_vars) > 0 and any(
+                var.size > 0 for var in node.data_vars.values()
+            )
+        else:
+            non_empty_cond = lambda node: len(node.data_vars) > 0
+
+        return self.filter(non_empty_cond)
+
     def match(self, pattern: str) -> DataTree:
         """
         Return nodes with paths matching pattern.
@@ -1488,9 +1748,33 @@ class DataTree(
         }
         return DataTree.from_dict(matching_nodes, name=self.name)
 
+    @overload
     def map_over_datasets(
         self,
-        func: Callable,
+        func: Callable[..., Dataset | None],
+        *args: Any,
+        kwargs: Mapping[str, Any] | None = None,
+    ) -> DataTree: ...
+
+    @overload
+    def map_over_datasets(
+        self,
+        func: Callable[..., tuple[Dataset | None, Dataset | None]],
+        *args: Any,
+        kwargs: Mapping[str, Any] | None = None,
+    ) -> tuple[DataTree, DataTree]: ...
+
+    @overload
+    def map_over_datasets(
+        self,
+        func: Callable[..., tuple[Dataset | None, ...]],
+        *args: Any,
+        kwargs: Mapping[str, Any] | None = None,
+    ) -> tuple[DataTree, ...]: ...
+
+    def map_over_datasets(
+        self,
+        func: Callable[..., Dataset | None | tuple[Dataset | None, ...]],
         *args: Any,
         kwargs: Mapping[str, Any] | None = None,
     ) -> DataTree | tuple[DataTree, ...]:
@@ -1525,8 +1809,7 @@ class DataTree(
         map_over_datasets
         """
         # TODO this signature means that func has no way to know which node it is being called upon - change?
-        # TODO fix this typing error
-        return map_over_datasets(func, self, *args, kwargs=kwargs)
+        return map_over_datasets(func, self, *args, kwargs=kwargs)  # type: ignore[arg-type]
 
     @overload
     def pipe(
@@ -1620,10 +1903,9 @@ class DataTree(
 
     def _unary_op(self, f, *args, **kwargs) -> DataTree:
         # TODO do we need to any additional work to avoid duplication etc.? (Similar to aggregations)
-        return self.map_over_datasets(functools.partial(f, **kwargs), *args)  # type: ignore[return-value]
+        return self.map_over_datasets(functools.partial(f, **kwargs), *args)
 
     def _binary_op(self, other, f, reflexive=False, join=None) -> DataTree:
-        from xarray.core.dataset import Dataset
         from xarray.core.groupby import GroupBy
 
         if isinstance(other, GroupBy):
@@ -1654,9 +1936,11 @@ class DataTree(
     def __eq__(self, other: DtCompatible) -> Self:  # type: ignore[override]
         return super().__eq__(other)
 
+    # filepath=None writes to a memoryview
+    @overload
     def to_netcdf(
         self,
-        filepath,
+        filepath: None = None,
         mode: NetcdfWriteModes = "w",
         encoding=None,
         unlimited_dims=None,
@@ -1666,14 +1950,63 @@ class DataTree(
         write_inherited_coords: bool = False,
         compute: bool = True,
         **kwargs,
-    ):
+    ) -> memoryview: ...
+
+    # compute=False returns dask.Delayed
+    @overload
+    def to_netcdf(
+        self,
+        filepath: str | PathLike | io.IOBase,
+        mode: NetcdfWriteModes = "w",
+        encoding=None,
+        unlimited_dims=None,
+        format: T_DataTreeNetcdfTypes | None = None,
+        engine: T_DataTreeNetcdfEngine | None = None,
+        group: str | None = None,
+        write_inherited_coords: bool = False,
+        *,
+        compute: Literal[False],
+        **kwargs,
+    ) -> Delayed: ...
+
+    # default return None
+    @overload
+    def to_netcdf(
+        self,
+        filepath: str | PathLike | io.IOBase,
+        mode: NetcdfWriteModes = "w",
+        encoding=None,
+        unlimited_dims=None,
+        format: T_DataTreeNetcdfTypes | None = None,
+        engine: T_DataTreeNetcdfEngine | None = None,
+        group: str | None = None,
+        write_inherited_coords: bool = False,
+        compute: Literal[True] = True,
+        **kwargs,
+    ) -> None: ...
+
+    def to_netcdf(
+        self,
+        filepath: str | PathLike | io.IOBase | None = None,
+        mode: NetcdfWriteModes = "w",
+        encoding=None,
+        unlimited_dims=None,
+        format: T_DataTreeNetcdfTypes | None = None,
+        engine: T_DataTreeNetcdfEngine | None = None,
+        group: str | None = None,
+        write_inherited_coords: bool = False,
+        compute: bool = True,
+        **kwargs,
+    ) -> None | memoryview | Delayed:
         """
         Write datatree contents to a netCDF file.
 
         Parameters
         ----------
-        filepath : str or Path
-            Path to which to save this datatree.
+        filepath : str or PathLike or file-like object or None
+            Path to which to save this datatree, or a file-like object to write
+            it to (which must support read and write and be seekable) or None
+            to return in-memory bytes as a memoryview.
         mode : {"w", "a"}, default: "w"
             Write ('w') or append ('a') mode. If mode='w', any existing file at
             this location will be overwritten. If mode='a', existing variables
@@ -1695,8 +2028,9 @@ class DataTree(
             * NETCDF4: Data is stored in an HDF5 file, using netCDF4 API features.
         engine : {"netcdf4", "h5netcdf"}, optional
             Engine to use when writing netCDF files. If not provided, the
-            default engine is chosen based on available dependencies, with a
-            preference for "netcdf4" if writing to a file on disk.
+            default engine is chosen based on available dependencies, by default
+            preferring "h5netcdf" over "netcdf4" (customizable via
+            ``netcdf_engine_order`` in ``xarray.set_options()``).
         group : str, optional
             Path to the netCDF4 group in the given file to open as the root group
             of the ``DataTree``. Currently, specifying a group is not supported.
@@ -1708,18 +2042,23 @@ class DataTree(
         compute : bool, default: True
             If true compute immediately, otherwise return a
             ``dask.delayed.Delayed`` object that can be computed later.
-            Currently, ``compute=False`` is not supported.
         kwargs :
             Additional keyword arguments to be passed to ``xarray.Dataset.to_netcdf``
 
+        Returns
+        -------
+            * ``memoryview`` if path is None
+            * ``dask.delayed.Delayed`` if compute is False
+            * ``None`` otherwise
+
         Note
         ----
             Due to file format specifications the on-disk root group name
             is always ``"/"`` overriding any given ``DataTree`` root node name.
         """
-        from xarray.core.datatree_io import _datatree_to_netcdf
+        from xarray.backends.writers import _datatree_to_netcdf
 
-        _datatree_to_netcdf(
+        return _datatree_to_netcdf(
             self,
             filepath,
             mode=mode,
@@ -1733,6 +2072,35 @@ class DataTree(
             **kwargs,
         )
 
+    # compute=False returns dask.Delayed
+    @overload
+    def to_zarr(
+        self,
+        store,
+        mode: ZarrWriteModes = "w-",
+        encoding=None,
+        consolidated: bool = True,
+        group: str | None = None,
+        write_inherited_coords: bool = False,
+        *,
+        compute: Literal[False],
+        **kwargs,
+    ) -> Delayed: ...
+
+    # default returns ZarrStore
+    @overload
+    def to_zarr(
+        self,
+        store,
+        mode: ZarrWriteModes = "w-",
+        encoding=None,
+        consolidated: bool = True,
+        group: str | None = None,
+        write_inherited_coords: bool = False,
+        compute: Literal[True] = True,
+        **kwargs,
+    ) -> ZarrStore: ...
+
     def to_zarr(
         self,
         store,
@@ -1743,7 +2111,7 @@ class DataTree(
         write_inherited_coords: bool = False,
         compute: bool = True,
         **kwargs,
-    ):
+    ) -> ZarrStore | Delayed:
         """
         Write datatree contents to a Zarr store.
 
@@ -1774,8 +2142,7 @@ class DataTree(
         compute : bool, default: True
             If true compute immediately, otherwise return a
             ``dask.delayed.Delayed`` object that can be computed later. Metadata
-            is always updated eagerly. Currently, ``compute=False`` is not
-            supported.
+            is always updated eagerly.
         kwargs :
             Additional keyword arguments to be passed to ``xarray.Dataset.to_zarr``
 
@@ -1784,9 +2151,9 @@ class DataTree(
             Due to file format specifications the on-disk root group name
             is always ``"/"`` overriding any given ``DataTree`` root node name.
         """
-        from xarray.core.datatree_io import _datatree_to_zarr
+        from xarray.backends.writers import _datatree_to_zarr
 
-        _datatree_to_zarr(
+        return _datatree_to_zarr(
             self,
             store,
             mode=mode,
@@ -1799,7 +2166,7 @@ class DataTree(
         )
 
     def _get_all_dims(self) -> set:
-        all_dims = set()
+        all_dims: set[Any] = set()
         for node in self.subtree:
             all_dims.update(node._node_dims)
         return all_dims
diff -pruN 2025.03.1-8/xarray/core/datatree_io.py 2025.10.1-1/xarray/core/datatree_io.py
--- 2025.03.1-8/xarray/core/datatree_io.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/xarray/core/datatree_io.py	1970-01-01 00:00:00.000000000 +0000
@@ -1,139 +0,0 @@
-from __future__ import annotations
-
-from collections.abc import Mapping
-from os import PathLike
-from typing import TYPE_CHECKING, Any, Literal, get_args
-
-from xarray.core.datatree import DataTree
-from xarray.core.types import NetcdfWriteModes, ZarrWriteModes
-
-T_DataTreeNetcdfEngine = Literal["netcdf4", "h5netcdf"]
-T_DataTreeNetcdfTypes = Literal["NETCDF4"]
-
-if TYPE_CHECKING:
-    from xarray.core.types import ZarrStoreLike
-
-
-def _datatree_to_netcdf(
-    dt: DataTree,
-    filepath: str | PathLike,
-    mode: NetcdfWriteModes = "w",
-    encoding: Mapping[str, Any] | None = None,
-    unlimited_dims: Mapping | None = None,
-    format: T_DataTreeNetcdfTypes | None = None,
-    engine: T_DataTreeNetcdfEngine | None = None,
-    group: str | None = None,
-    write_inherited_coords: bool = False,
-    compute: bool = True,
-    **kwargs,
-) -> None:
-    """This function creates an appropriate datastore for writing a datatree to
-    disk as a netCDF file.
-
-    See `DataTree.to_netcdf` for full API docs.
-    """
-
-    if format not in [None, *get_args(T_DataTreeNetcdfTypes)]:
-        raise ValueError("to_netcdf only supports the NETCDF4 format")
-
-    if engine not in [None, *get_args(T_DataTreeNetcdfEngine)]:
-        raise ValueError("to_netcdf only supports the netcdf4 and h5netcdf engines")
-
-    if group is not None:
-        raise NotImplementedError(
-            "specifying a root group for the tree has not been implemented"
-        )
-
-    if not compute:
-        raise NotImplementedError("compute=False has not been implemented yet")
-
-    if encoding is None:
-        encoding = {}
-
-    # In the future, we may want to expand this check to insure all the provided encoding
-    # options are valid. For now, this simply checks that all provided encoding keys are
-    # groups in the datatree.
-    if set(encoding) - set(dt.groups):
-        raise ValueError(
-            f"unexpected encoding group name(s) provided: {set(encoding) - set(dt.groups)}"
-        )
-
-    if unlimited_dims is None:
-        unlimited_dims = {}
-
-    for node in dt.subtree:
-        at_root = node is dt
-        ds = node.to_dataset(inherit=write_inherited_coords or at_root)
-        group_path = None if at_root else "/" + node.relative_to(dt)
-        ds.to_netcdf(
-            filepath,
-            group=group_path,
-            mode=mode,
-            encoding=encoding.get(node.path),
-            unlimited_dims=unlimited_dims.get(node.path),
-            engine=engine,
-            format=format,
-            compute=compute,
-            **kwargs,
-        )
-        mode = "a"
-
-
-def _datatree_to_zarr(
-    dt: DataTree,
-    store: ZarrStoreLike,
-    mode: ZarrWriteModes = "w-",
-    encoding: Mapping[str, Any] | None = None,
-    consolidated: bool = True,
-    group: str | None = None,
-    write_inherited_coords: bool = False,
-    compute: bool = True,
-    **kwargs,
-):
-    """This function creates an appropriate datastore for writing a datatree
-    to a zarr store.
-
-    See `DataTree.to_zarr` for full API docs.
-    """
-
-    from zarr import consolidate_metadata
-
-    if group is not None:
-        raise NotImplementedError(
-            "specifying a root group for the tree has not been implemented"
-        )
-
-    if "append_dim" in kwargs:
-        raise NotImplementedError(
-            "specifying ``append_dim`` with ``DataTree.to_zarr`` has not been implemented"
-        )
-
-    if encoding is None:
-        encoding = {}
-
-    # In the future, we may want to expand this check to insure all the provided encoding
-    # options are valid. For now, this simply checks that all provided encoding keys are
-    # groups in the datatree.
-    if set(encoding) - set(dt.groups):
-        raise ValueError(
-            f"unexpected encoding group name(s) provided: {set(encoding) - set(dt.groups)}"
-        )
-
-    for node in dt.subtree:
-        at_root = node is dt
-        ds = node.to_dataset(inherit=write_inherited_coords or at_root)
-        group_path = None if at_root else "/" + node.relative_to(dt)
-        ds.to_zarr(
-            store,
-            group=group_path,
-            mode=mode,
-            encoding=encoding.get(node.path),
-            consolidated=False,
-            compute=compute,
-            **kwargs,
-        )
-        if "w" in mode:
-            mode = "a"
-
-    if consolidated:
-        consolidate_metadata(store)
diff -pruN 2025.03.1-8/xarray/core/datatree_mapping.py 2025.10.1-1/xarray/core/datatree_mapping.py
--- 2025.03.1-8/xarray/core/datatree_mapping.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/xarray/core/datatree_mapping.py	2025-10-10 10:38:05.000000000 +0000
@@ -1,6 +1,5 @@
 from __future__ import annotations
 
-import sys
 from collections.abc import Callable, Mapping
 from typing import TYPE_CHECKING, Any, cast, overload
 
@@ -14,15 +13,14 @@ if TYPE_CHECKING:
 
 @overload
 def map_over_datasets(
-    func: Callable[
-        ...,
-        Dataset | None,
-    ],
+    func: Callable[..., Dataset | None],
     *args: Any,
     kwargs: Mapping[str, Any] | None = None,
 ) -> DataTree: ...
 
 
+# add an explicit overload for the most common case of two return values
+# (python typing does not have a way to match tuple lengths in general)
 @overload
 def map_over_datasets(
     func: Callable[..., tuple[Dataset | None, Dataset | None]],
@@ -31,8 +29,6 @@ def map_over_datasets(
 ) -> tuple[DataTree, DataTree]: ...
 
 
-# add an expect overload for the most common case of two return values
-# (python typing does not have a way to match tuple lengths in general)
 @overload
 def map_over_datasets(
     func: Callable[..., tuple[Dataset | None, ...]],
@@ -106,7 +102,7 @@ def map_over_datasets(
     # Walk all trees simultaneously, applying func to all nodes that lie in same position in different trees
     # We don't know which arguments are DataTrees so we zip all arguments together as iterables
     # Store tuples of results in a dict because we don't yet know how many trees we need to rebuild to return
-    out_data_objects: dict[str, Dataset | None | tuple[Dataset | None, ...]] = {}
+    out_data_objects: dict[str, Dataset | tuple[Dataset | None, ...] | None] = {}
 
     tree_args = [arg for arg in args if isinstance(arg, DataTree)]
     name = result_name(tree_args)
@@ -162,16 +158,12 @@ def _handle_errors_with_path_context(pat
 
 
 def add_note(err: BaseException, msg: str) -> None:
-    # TODO: remove once python 3.10 can be dropped
-    if sys.version_info < (3, 11):
-        err.__notes__ = getattr(err, "__notes__", []) + [msg]  # type: ignore[attr-defined]
-    else:
-        err.add_note(msg)
+    err.add_note(msg)
 
 
 def _check_single_set_return_values(path_to_node: str, obj: Any) -> int | None:
     """Check types returned from single evaluation of func, and return number of return values received from func."""
-    if isinstance(obj, None | Dataset):
+    if isinstance(obj, Dataset | None):
         return None  # no need to pack results
 
     if not isinstance(obj, tuple) or not all(
diff -pruN 2025.03.1-8/xarray/core/datatree_render.py 2025.10.1-1/xarray/core/datatree_render.py
--- 2025.03.1-8/xarray/core/datatree_render.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/xarray/core/datatree_render.py	2025-10-10 10:38:05.000000000 +0000
@@ -8,14 +8,18 @@ type hints.
 
 from __future__ import annotations
 
-from collections import namedtuple
 from collections.abc import Iterable, Iterator
-from typing import TYPE_CHECKING
+from math import ceil
+from typing import TYPE_CHECKING, NamedTuple
 
 if TYPE_CHECKING:
     from xarray.core.datatree import DataTree
 
-Row = namedtuple("Row", ("pre", "fill", "node"))
+
+class Row(NamedTuple):
+    pre: str
+    fill: str
+    node: DataTree | str
 
 
 class AbstractStyle:
@@ -79,6 +83,7 @@ class RenderDataTree:
         style=None,
         childiter: type = list,
         maxlevel: int | None = None,
+        maxchildren: int | None = None,
     ):
         """
         Render tree starting at `node`.
@@ -88,6 +93,7 @@ class RenderDataTree:
                 Iterables that change the order of children  cannot be used
                 (e.g., `reversed`).
             maxlevel: Limit rendering to this depth.
+            maxchildren: Limit number of children at each node.
         :any:`RenderDataTree` is an iterator, returning a tuple with 3 items:
         `pre`
             tree prefix.
@@ -160,6 +166,16 @@ class RenderDataTree:
         root
         ├── sub0
         └── sub1
+
+        # `maxchildren` limits the number of children per node
+
+        >>> print(RenderDataTree(root, maxchildren=1).by_attr("name"))
+        root
+        ├── sub0
+        │   ├── sub0B
+        │   ...
+        ...
+
         """
         if style is None:
             style = ContStyle()
@@ -169,24 +185,44 @@ class RenderDataTree:
         self.style = style
         self.childiter = childiter
         self.maxlevel = maxlevel
+        self.maxchildren = maxchildren
 
     def __iter__(self) -> Iterator[Row]:
         return self.__next(self.node, tuple())
 
     def __next(
-        self, node: DataTree, continues: tuple[bool, ...], level: int = 0
+        self,
+        node: DataTree,
+        continues: tuple[bool, ...],
+        level: int = 0,
     ) -> Iterator[Row]:
         yield RenderDataTree.__item(node, continues, self.style)
         children = node.children.values()
         level += 1
         if children and (self.maxlevel is None or level < self.maxlevel):
+            nchildren = len(children)
             children = self.childiter(children)
-            for child, is_last in _is_last(children):
-                yield from self.__next(child, continues + (not is_last,), level=level)
+            for i, (child, is_last) in enumerate(_is_last(children)):
+                if (
+                    self.maxchildren is None
+                    or i < ceil(self.maxchildren / 2)
+                    or i >= ceil(nchildren - self.maxchildren / 2)
+                ):
+                    yield from self.__next(
+                        child,
+                        continues + (not is_last,),
+                        level=level,
+                    )
+                if (
+                    self.maxchildren is not None
+                    and nchildren > self.maxchildren
+                    and i == ceil(self.maxchildren / 2)
+                ):
+                    yield RenderDataTree.__item("...", continues, self.style)
 
     @staticmethod
     def __item(
-        node: DataTree, continues: tuple[bool, ...], style: AbstractStyle
+        node: DataTree | str, continues: tuple[bool, ...], style: AbstractStyle
     ) -> Row:
         if not continues:
             return Row("", "", node)
@@ -244,6 +280,9 @@ class RenderDataTree:
 
         def get() -> Iterator[str]:
             for pre, fill, node in self:
+                if isinstance(node, str):
+                    yield f"{fill}{node}"
+                    continue
                 attr = (
                     attrname(node)
                     if callable(attrname)
diff -pruN 2025.03.1-8/xarray/core/dtypes.py 2025.10.1-1/xarray/core/dtypes.py
--- 2025.03.1-8/xarray/core/dtypes.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/xarray/core/dtypes.py	2025-10-10 10:38:05.000000000 +0000
@@ -4,7 +4,7 @@ import functools
 from typing import Any
 
 import numpy as np
-from pandas.api.types import is_extension_array_dtype
+import pandas as pd
 
 from xarray.compat import array_api_compat, npcompat
 from xarray.compat.npcompat import HAS_STRING_DTYPE
@@ -213,7 +213,7 @@ def isdtype(dtype, kind: str | tuple[str
 
     if isinstance(dtype, np.dtype):
         return npcompat.isdtype(dtype, kind)
-    elif is_extension_array_dtype(dtype):
+    elif pd.api.types.is_extension_array_dtype(dtype):  # noqa: TID251
         # we never want to match pandas extension array dtypes
         return False
     else:
@@ -238,7 +238,7 @@ def preprocess_types(t):
 
 
 def result_type(
-    *arrays_and_dtypes: np.typing.ArrayLike | np.typing.DTypeLike,
+    *arrays_and_dtypes: np.typing.ArrayLike | np.typing.DTypeLike | None,
     xp=None,
 ) -> np.dtype:
     """Like np.result_type, but with type promotion rules matching pandas.
diff -pruN 2025.03.1-8/xarray/core/duck_array_ops.py 2025.10.1-1/xarray/core/duck_array_ops.py
--- 2025.03.1-8/xarray/core/duck_array_ops.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/xarray/core/duck_array_ops.py	2025-10-10 10:38:05.000000000 +0000
@@ -13,20 +13,21 @@ import warnings
 from collections.abc import Callable
 from functools import partial
 from importlib import import_module
+from typing import Any
 
 import numpy as np
 import pandas as pd
-from numpy import (  # noqa: F401
+from numpy import (
     isclose,
     isnat,
     take,
-    unravel_index,
+    unravel_index,  # noqa: F401
 )
-from pandas.api.types import is_extension_array_dtype
 
 from xarray.compat import dask_array_compat, dask_array_ops
 from xarray.compat.array_api_compat import get_array_namespace
 from xarray.core import dtypes, nputils
+from xarray.core.extension_array import PandasExtensionArray
 from xarray.core.options import OPTIONS
 from xarray.core.utils import is_duck_array, is_duck_dask_array, module_available
 from xarray.namedarray.parallelcompat import get_chunked_array_type
@@ -47,8 +48,6 @@ dask_available = module_available("dask"
 
 
 def einsum(*args, **kwargs):
-    from xarray.core.options import OPTIONS
-
     if OPTIONS["use_opt_einsum"] and module_available("opt_einsum"):
         import opt_einsum
 
@@ -143,6 +142,21 @@ def round(array):
 around: Callable = round
 
 
+def isna(data: Any) -> bool:
+    """Checks if data is literally np.nan or pd.NA.
+
+    Parameters
+    ----------
+    data
+        Any python object
+
+    Returns
+    -------
+        Whether or not the data is np.nan or pd.NA
+    """
+    return data is pd.NA or data is np.nan  # noqa: PLW0177
+
+
 def isnull(data):
     data = asarray(data)
 
@@ -168,16 +182,15 @@ def isnull(data):
         # bool_ is for backwards compat with numpy<2, and cupy
         dtype = xp.bool_ if hasattr(xp, "bool_") else xp.bool
         return full_like(data, dtype=dtype, fill_value=False)
-    else:
-        # at this point, array should have dtype=object
-        if isinstance(data, np.ndarray) or is_extension_array_dtype(data):
-            return pandas_isnull(data)
-        else:
-            # Not reachable yet, but intended for use with other duck array
-            # types. For full consistency with pandas, we should accept None as
-            # a null value as well as NaN, but it isn't clear how to do this
-            # with duck typing.
-            return data != data
+    # at this point, array should have dtype=object
+    elif isinstance(data, np.ndarray) or pd.api.types.is_extension_array_dtype(data):  # noqa: TID251
+        return pandas_isnull(data)
+    else:
+        # Not reachable yet, but intended for use with other duck array
+        # types. For full consistency with pandas, we should accept None as
+        # a null value as well as NaN, but it isn't clear how to do this
+        # with duck typing.
+        return data != data  # noqa: PLR0124
 
 
 def notnull(data):
@@ -251,18 +264,27 @@ def asarray(data, xp=np, dtype=None):
 
 
 def as_shared_dtype(scalars_or_arrays, xp=None):
-    """Cast a arrays to a shared dtype using xarray's type promotion rules."""
-    if any(is_extension_array_dtype(x) for x in scalars_or_arrays):
-        extension_array_types = [
-            x.dtype for x in scalars_or_arrays if is_extension_array_dtype(x)
-        ]
-        if len(extension_array_types) == len(scalars_or_arrays) and all(
+    """Cast arrays to a shared dtype using xarray's type promotion rules."""
+    extension_array_types = [
+        x.dtype
+        for x in scalars_or_arrays
+        if pd.api.types.is_extension_array_dtype(x)  # noqa: TID251
+    ]
+    if len(extension_array_types) >= 1:
+        non_nans = [x for x in scalars_or_arrays if not isna(x)]
+        if len(extension_array_types) == len(non_nans) and all(
             isinstance(x, type(extension_array_types[0])) for x in extension_array_types
         ):
-            return scalars_or_arrays
+            return [
+                x
+                if not isna(x)
+                else PandasExtensionArray(
+                    type(non_nans[0].array)._from_sequence([x], dtype=non_nans[0].dtype)
+                )
+                for x in scalars_or_arrays
+            ]
         raise ValueError(
-            "Cannot cast arrays to shared type, found"
-            f" array types {[x.dtype for x in scalars_or_arrays]}"
+            f"Cannot cast values to shared type, found values: {scalars_or_arrays}"
         )
 
     # Avoid calling array_type("cupy") repeatidely in the any check
@@ -658,9 +680,7 @@ def timedelta_to_numeric(value, datetime
         The output data type.
 
     """
-    import datetime as dt
-
-    if isinstance(value, dt.timedelta):
+    if isinstance(value, datetime.timedelta):
         out = py_timedelta_to_float(value, datetime_unit)
     elif isinstance(value, np.timedelta64):
         out = np_timedelta64_to_float(value, datetime_unit)
@@ -773,6 +793,12 @@ def _nd_cum_func(cum_func, array, axis,
     return out
 
 
+def ndim(array) -> int:
+    # Required part of the duck array and the array-api, but we fall back in case
+    # https://docs.xarray.dev/en/latest/internals/duck-arrays-integration.html#duck-array-requirements
+    return array.ndim if hasattr(array, "ndim") else np.ndim(array)
+
+
 def cumprod(array, axis=None, **kwargs):
     """N-dimensional version of cumprod."""
     return _nd_cum_func(cumprod_1d, array, axis, **kwargs)
diff -pruN 2025.03.1-8/xarray/core/extension_array.py 2025.10.1-1/xarray/core/extension_array.py
--- 2025.03.1-8/xarray/core/extension_array.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/xarray/core/extension_array.py	2025-10-10 10:38:05.000000000 +0000
@@ -1,13 +1,16 @@
 from __future__ import annotations
 
+import copy
 from collections.abc import Callable, Sequence
-from typing import Generic, cast
+from dataclasses import dataclass
+from typing import Any, Generic, cast
 
 import numpy as np
 import pandas as pd
-from pandas.api.types import is_extension_array_dtype
+from packaging.version import Version
 
 from xarray.core.types import DTypeLikeSave, T_ExtensionArray
+from xarray.core.utils import NDArrayMixin, is_allowed_extension_array
 
 HANDLED_EXTENSION_ARRAY_FUNCTIONS: dict[Callable, Callable] = {}
 
@@ -33,12 +36,12 @@ def __extension_duck_array__issubdtype(
 def __extension_duck_array__broadcast(arr: T_ExtensionArray, shape: tuple):
     if shape[0] == len(arr) and len(shape) == 1:
         return arr
-    raise NotImplementedError("Cannot broadcast 1d-only pandas categorical array.")
+    raise NotImplementedError("Cannot broadcast 1d-only pandas extension array.")
 
 
 @implements(np.stack)
 def __extension_duck_array__stack(arr: T_ExtensionArray, axis: int):
-    raise NotImplementedError("Cannot stack 1d-only pandas categorical array.")
+    raise NotImplementedError("Cannot stack 1d-only pandas extension array.")
 
 
 @implements(np.concatenate)
@@ -62,21 +65,46 @@ def __extension_duck_array__where(
     return cast(T_ExtensionArray, pd.Series(x).where(condition, pd.Series(y)).array)
 
 
-class PandasExtensionArray(Generic[T_ExtensionArray]):
-    array: T_ExtensionArray
+@implements(np.ndim)
+def __extension_duck_array__ndim(x: PandasExtensionArray) -> int:
+    return x.ndim
+
+
+@implements(np.reshape)
+def __extension_duck_array__reshape(
+    arr: T_ExtensionArray, shape: tuple
+) -> T_ExtensionArray:
+    if (shape[0] == len(arr) and len(shape) == 1) or shape == (-1,):
+        return arr
+    raise NotImplementedError(
+        f"Cannot reshape 1d-only pandas extension array to: {shape}"
+    )
 
-    def __init__(self, array: T_ExtensionArray):
-        """NEP-18 compliant wrapper for pandas extension arrays.
 
-        Parameters
-        ----------
-        array : T_ExtensionArray
-            The array to be wrapped upon e.g,. :py:class:`xarray.Variable` creation.
-        ```
-        """
-        if not isinstance(array, pd.api.extensions.ExtensionArray):
-            raise TypeError(f"{array} is not an pandas ExtensionArray.")
-        self.array = array
+@dataclass(frozen=True)
+class PandasExtensionArray(NDArrayMixin, Generic[T_ExtensionArray]):
+    """NEP-18 compliant wrapper for pandas extension arrays.
+
+    Parameters
+    ----------
+    array : T_ExtensionArray
+        The array to be wrapped upon e.g,. :py:class:`xarray.Variable` creation.
+    ```
+    """
+
+    array: T_ExtensionArray
+
+    def __post_init__(self):
+        if not isinstance(self.array, pd.api.extensions.ExtensionArray):
+            raise TypeError(f"{self.array} is not an pandas ExtensionArray.")
+        # This does not use the UNSUPPORTED_EXTENSION_ARRAY_TYPES whitelist because
+        # we do support extension arrays from datetime, for example, that need
+        # duck array support internally via this class.  These can appear from `DatetimeIndex`
+        # wrapped by `PandasIndex` internally, for example.
+        if not is_allowed_extension_array(self.array):
+            raise TypeError(
+                f"{self.array.dtype!r} should be converted to a numpy array in `xarray` internally."
+            )
 
     def __array_function__(self, func, types, args, kwargs):
         def replace_duck_with_extension_array(args) -> list:
@@ -96,28 +124,22 @@ class PandasExtensionArray(Generic[T_Ext
 
         args = tuple(replace_duck_with_extension_array(args))
         if func not in HANDLED_EXTENSION_ARRAY_FUNCTIONS:
-            return func(*args, **kwargs)
+            raise KeyError("Function not registered for pandas extension arrays.")
         res = HANDLED_EXTENSION_ARRAY_FUNCTIONS[func](*args, **kwargs)
-        if is_extension_array_dtype(res):
-            return type(self)[type(res)](res)
+        if is_allowed_extension_array(res):
+            return PandasExtensionArray(res)
         return res
 
-    def __array_ufunc__(ufunc, method, *inputs, **kwargs):
+    def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
         return ufunc(*inputs, **kwargs)
 
-    def __repr__(self):
-        return f"PandasExtensionArray(array={self.array!r})"
-
-    def __getattr__(self, attr: str) -> object:
-        return getattr(self.array, attr)
-
     def __getitem__(self, key) -> PandasExtensionArray[T_ExtensionArray]:
         item = self.array[key]
-        if is_extension_array_dtype(item):
-            return type(self)(item)
-        if np.isscalar(item):
-            return type(self)(type(self.array)([item]))  # type: ignore[call-arg]  # only subclasses with proper __init__ allowed
-        return item
+        if is_allowed_extension_array(item):
+            return PandasExtensionArray(item)
+        if np.isscalar(item) or isinstance(key, int):
+            return PandasExtensionArray(type(self.array)._from_sequence([item]))  # type: ignore[call-arg,attr-defined,unused-ignore]
+        return PandasExtensionArray(item)
 
     def __setitem__(self, key, val):
         self.array[key] = val
@@ -132,3 +154,32 @@ class PandasExtensionArray(Generic[T_Ext
 
     def __len__(self):
         return len(self.array)
+
+    @property
+    def ndim(self) -> int:
+        return 1
+
+    def __array__(
+        self, dtype: np.typing.DTypeLike | None = None, /, *, copy: bool | None = None
+    ) -> np.ndarray:
+        if Version(np.__version__) >= Version("2.0.0"):
+            return np.asarray(self.array, dtype=dtype, copy=copy)
+        else:
+            return np.asarray(self.array, dtype=dtype)
+
+    def __getattr__(self, attr: str) -> Any:
+        #  with __deepcopy__ or __copy__, the object is first constructed and then the sub-objects are attached (see https://docs.python.org/3/library/copy.html)
+        # Thus, if we didn't have `super().__getattribute__("array")` this method would call `self.array` (i.e., `getattr(self, "array")`) again while looking for `__setstate__`
+        # (which is apparently the first thing sought in copy.copy from the under-construction copied object),
+        # which would cause a recursion error since `array` is not present on the object when it is being constructed during `__{deep}copy__`.
+        # Even though we have defined these two methods now below due to `test_extension_array_copy_arrow_type` (cause unknown)
+        # we leave this here as it more robust than self.array
+        return getattr(super().__getattribute__("array"), attr)
+
+    def __copy__(self) -> PandasExtensionArray[T_ExtensionArray]:
+        return PandasExtensionArray(copy.copy(self.array))
+
+    def __deepcopy__(
+        self, memo: dict[int, Any] | None = None
+    ) -> PandasExtensionArray[T_ExtensionArray]:
+        return PandasExtensionArray(copy.deepcopy(self.array, memo=memo))
diff -pruN 2025.03.1-8/xarray/core/formatting.py 2025.10.1-1/xarray/core/formatting.py
--- 2025.03.1-8/xarray/core/formatting.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/xarray/core/formatting.py	2025-10-10 10:38:05.000000000 +0000
@@ -19,7 +19,12 @@ from pandas.errors import OutOfBoundsDat
 
 from xarray.core.datatree_render import RenderDataTree
 from xarray.core.duck_array_ops import array_all, array_any, array_equiv, astype, ravel
-from xarray.core.indexing import MemoryCachedArray
+from xarray.core.extension_array import PandasExtensionArray
+from xarray.core.indexing import (
+    BasicIndexer,
+    ExplicitlyIndexed,
+    MemoryCachedArray,
+)
 from xarray.core.options import OPTIONS, _get_boolean_with_default
 from xarray.core.treenode import group_subtrees
 from xarray.core.utils import is_duck_array
@@ -86,6 +91,8 @@ def first_n_items(array, n_desired):
 
     if n_desired < array.size:
         indexer = _get_indexer_at_least_n_items(array.shape, n_desired, from_end=False)
+        if isinstance(array, ExplicitlyIndexed):
+            indexer = BasicIndexer(indexer)
         array = array[indexer]
 
     # We pass variable objects in to handle indexing
@@ -110,6 +117,8 @@ def last_n_items(array, n_desired):
 
     if n_desired < array.size:
         indexer = _get_indexer_at_least_n_items(array.shape, n_desired, from_end=True)
+        if isinstance(array, ExplicitlyIndexed):
+            indexer = BasicIndexer(indexer)
         array = array[indexer]
 
     # We pass variable objects in to handle indexing
@@ -176,6 +185,11 @@ def format_timedelta(t, timedelta_format
 
 def format_item(x, timedelta_format=None, quote_strings=True):
     """Returns a succinct summary of an object as a string"""
+    if isinstance(x, PandasExtensionArray):
+        # We want to bypass PandasExtensionArray's repr here
+        # because its __repr__ is PandasExtensionArray(array=[...])
+        # and this function is only for single elements.
+        return str(x.array[0])
     if isinstance(x, np.datetime64 | datetime):
         return format_timestamp(x)
     if isinstance(x, np.timedelta64 | timedelta):
@@ -184,7 +198,7 @@ def format_item(x, timedelta_format=None
         if hasattr(x, "dtype"):
             x = x.item()
         return repr(x) if quote_strings else x
-    elif hasattr(x, "dtype") and np.issubdtype(x.dtype, np.floating):
+    elif hasattr(x, "dtype") and np.issubdtype(x.dtype, np.floating) and x.shape == ():
         return f"{x.item():.4}"
     else:
         return str(x)
@@ -194,7 +208,9 @@ def format_items(x):
     """Returns a succinct summaries of all items in a sequence as strings"""
     x = to_duck_array(x)
     timedelta_format = "datetime"
-    if np.issubdtype(x.dtype, np.timedelta64):
+    if not isinstance(x, PandasExtensionArray) and np.issubdtype(
+        x.dtype, np.timedelta64
+    ):
         x = astype(x, dtype="timedelta64[ns]")
         day_part = x[~pd.isnull(x)].astype("timedelta64[D]").astype("timedelta64[ns]")
         time_needed = x[~pd.isnull(x)] != day_part
@@ -312,7 +328,7 @@ def inline_variable_array_repr(var, max_
 def summarize_variable(
     name: Hashable,
     var: Variable,
-    col_width: int,
+    col_width: int | None = None,
     max_width: int | None = None,
     is_index: bool = False,
 ):
@@ -327,20 +343,22 @@ def summarize_variable(
             max_width = max_width_options
 
     marker = "*" if is_index else " "
-    first_col = pretty_print(f"  {marker} {name} ", col_width)
+    first_col = f"  {marker} {name} "
+    if col_width is not None:
+        first_col = pretty_print(first_col, col_width)
 
     if variable.dims:
-        dims_str = "({}) ".format(", ".join(map(str, variable.dims)))
+        dims_str = ", ".join(map(str, variable.dims))
+        dims_str = f"({dims_str}) "
     else:
         dims_str = ""
 
-    nbytes_str = f" {render_human_readable_nbytes(variable.nbytes)}"
-    front_str = f"{first_col}{dims_str}{variable.dtype}{nbytes_str} "
+    front_str = f"{first_col}{dims_str}{variable.dtype} {render_human_readable_nbytes(variable.nbytes)} "
 
     values_width = max_width - len(front_str)
     values_str = inline_variable_array_repr(variable, values_width)
 
-    return front_str + values_str
+    return f"{front_str}{values_str}"
 
 
 def summarize_attr(key, value, col_width=None):
@@ -428,8 +446,12 @@ attrs_repr = functools.partial(
 def coords_repr(coords: AbstractCoordinates, col_width=None, max_rows=None):
     if col_width is None:
         col_width = _calculate_col_width(coords)
+    dims = tuple(coords._data.dims)
+    dim_ordered_coords = sorted(
+        coords.items(), key=lambda x: dims.index(x[0]) if x[0] in dims else len(dims)
+    )
     return _mapping_repr(
-        coords,
+        dict(dim_ordered_coords),
         title="Coordinates",
         summarizer=summarize_variable,
         expand_option_name="display_expand_coords",
@@ -454,7 +476,7 @@ def inherited_coords_repr(node: DataTree
     )
 
 
-def inline_index_repr(index: pd.Index, max_width=None):
+def inline_index_repr(index: pd.Index, max_width: int) -> str:
     if hasattr(index, "_repr_inline_"):
         repr_ = index._repr_inline_(max_width=max_width)
     else:
@@ -624,6 +646,8 @@ def short_array_repr(array):
 
     if isinstance(array, AbstractArray):
         array = array.data
+    if isinstance(array, pd.api.extensions.ExtensionArray):
+        return repr(array)
     array = to_duck_array(array)
 
     # default to lower precision so a full (abbreviated) line can fit on
@@ -647,6 +671,7 @@ def short_array_repr(array):
 def short_data_repr(array):
     """Format "data" for DataArray and Variable."""
     internal_data = getattr(array, "variable", array)._data
+
     if isinstance(array, np.ndarray):
         return short_array_repr(array)
     elif is_duck_array(internal_data):
@@ -882,7 +907,7 @@ def _diff_mapping_repr(
                     attrs_summary.append(attr_s)
 
                 temp = [
-                    "\n".join([var_s, attr_s]) if attr_s else var_s
+                    f"{var_s}\n{attr_s}" if attr_s else var_s
                     for var_s, attr_s in zip(temp, attrs_summary, strict=True)
                 ]
 
@@ -977,9 +1002,10 @@ def diff_array_repr(a, b, compat):
         ):
             summary.append(coords_diff)
 
-    if compat == "identical":
-        if attrs_diff := diff_attrs_repr(a.attrs, b.attrs, compat):
-            summary.append(attrs_diff)
+    if compat == "identical" and (
+        attrs_diff := diff_attrs_repr(a.attrs, b.attrs, compat)
+    ):
+        summary.append(attrs_diff)
 
     return "\n".join(summary)
 
@@ -1017,9 +1043,10 @@ def diff_dataset_repr(a, b, compat):
     ):
         summary.append(data_diff)
 
-    if compat == "identical":
-        if attrs_diff := diff_attrs_repr(a.attrs, b.attrs, compat):
-            summary.append(attrs_diff)
+    if compat == "identical" and (
+        attrs_diff := diff_attrs_repr(a.attrs, b.attrs, compat)
+    ):
+        summary.append(attrs_diff)
 
     return "\n".join(summary)
 
@@ -1027,15 +1054,13 @@ def diff_dataset_repr(a, b, compat):
 def diff_nodewise_summary(a: DataTree, b: DataTree, compat):
     """Iterates over all corresponding nodes, recording differences between data at each location."""
 
-    compat_str = _compat_to_str(compat)
-
     summary = []
     for path, (node_a, node_b) in group_subtrees(a, b):
         a_ds, b_ds = node_a.dataset, node_b.dataset
 
         if not a_ds._all_compat(b_ds, compat):
             path_str = "root node" if path == "." else f"node {path!r}"
-            dataset_diff = diff_dataset_repr(a_ds, b_ds, compat_str)
+            dataset_diff = diff_dataset_repr(a_ds, b_ds, compat)
             data_diff = indent(
                 "\n".join(dataset_diff.split("\n", 1)[1:]), prefix="    "
             )
@@ -1050,9 +1075,8 @@ def diff_datatree_repr(a: DataTree, b: D
         f"Left and right {type(a).__name__} objects are not {_compat_to_str(compat)}"
     ]
 
-    if compat == "identical":
-        if diff_name := diff_name_summary(a, b):
-            summary.append(diff_name)
+    if compat == "identical" and (diff_name := diff_name_summary(a, b)):
+        summary.append(diff_name)
 
     treestructure_diff = diff_treestructure(a, b)
 
@@ -1072,7 +1096,7 @@ def inherited_vars(mapping: ChainMap) ->
     return {k: v for k, v in mapping.parents.items() if k not in mapping.maps[0]}
 
 
-def _datatree_node_repr(node: DataTree, show_inherited: bool) -> str:
+def _datatree_node_repr(node: DataTree, root: bool) -> str:
     summary = [f"Group: {node.path}"]
 
     col_width = _calculate_col_width(node.variables)
@@ -1083,11 +1107,11 @@ def _datatree_node_repr(node: DataTree,
     # Only show dimensions if also showing a variable or coordinates section.
     show_dims = (
         node._node_coord_variables
-        or (show_inherited and inherited_coords)
+        or (root and inherited_coords)
         or node._data_variables
     )
 
-    dim_sizes = node.sizes if show_inherited else node._node_dims
+    dim_sizes = node.sizes if root else node._node_dims
 
     if show_dims:
         # Includes inherited dimensions.
@@ -1101,7 +1125,7 @@ def _datatree_node_repr(node: DataTree,
         node_coords = node.to_dataset(inherit=False).coords
         summary.append(coords_repr(node_coords, col_width=col_width, max_rows=max_rows))
 
-    if show_inherited and inherited_coords:
+    if root and inherited_coords:
         summary.append(
             inherited_coords_repr(node, col_width=col_width, max_rows=max_rows)
         )
@@ -1119,7 +1143,7 @@ def _datatree_node_repr(node: DataTree,
         )
 
     # TODO: only show indexes defined at this node, with a separate section for
-    # inherited indexes (if show_inherited=True)
+    # inherited indexes (if root=True)
     display_default_indexes = _get_boolean_with_default(
         "display_default_indexes", False
     )
@@ -1137,17 +1161,27 @@ def _datatree_node_repr(node: DataTree,
 
 def datatree_repr(dt: DataTree) -> str:
     """A printable representation of the structure of this entire tree."""
-    renderer = RenderDataTree(dt)
+    max_children = OPTIONS["display_max_children"]
+
+    renderer = RenderDataTree(dt, maxchildren=max_children)
 
     name_info = "" if dt.name is None else f" {dt.name!r}"
     header = f"<xarray.DataTree{name_info}>"
 
     lines = [header]
-    show_inherited = True
-    for pre, fill, node in renderer:
-        node_repr = _datatree_node_repr(node, show_inherited=show_inherited)
-        show_inherited = False  # only show inherited coords on the root
+    root = True
 
+    for pre, fill, node in renderer:
+        if isinstance(node, str):
+            lines.append(f"{fill}{node}")
+            continue
+
+        node_repr = _datatree_node_repr(node, root=root)
+        root = False  # only the first node is the root
+
+        # TODO: figure out if we can restructure this logic to move child groups
+        # up higher in the repr, directly below the <xarray.DataTree> header.
+        # This would be more consistent with the HTML repr.
         raw_repr_lines = node_repr.splitlines()
 
         node_line = f"{pre}{raw_repr_lines[0]}"
diff -pruN 2025.03.1-8/xarray/core/formatting_html.py 2025.10.1-1/xarray/core/formatting_html.py
--- 2025.03.1-8/xarray/core/formatting_html.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/xarray/core/formatting_html.py	2025-10-10 10:38:05.000000000 +0000
@@ -6,15 +6,17 @@ from collections.abc import Mapping
 from functools import lru_cache, partial
 from html import escape
 from importlib.resources import files
-from typing import TYPE_CHECKING
+from math import ceil
+from typing import TYPE_CHECKING, Literal
 
 from xarray.core.formatting import (
+    filter_nondefault_indexes,
     inherited_vars,
     inline_index_repr,
     inline_variable_array_repr,
     short_data_repr,
 )
-from xarray.core.options import _get_boolean_with_default
+from xarray.core.options import OPTIONS, _get_boolean_with_default
 
 STATIC_FILES = (
     ("xarray.static.html", "icons-svg-inline.html"),
@@ -115,7 +117,11 @@ def summarize_variable(name, var, is_ind
 
 def summarize_coords(variables) -> str:
     li_items = []
-    for k, v in variables.items():
+    dims = tuple(variables._data.dims)
+    dim_ordered_coords = sorted(
+        variables.items(), key=lambda x: dims.index(x[0]) if x[0] in dims else len(dims)
+    )
+    for k, v in dim_ordered_coords:
         li_content = summarize_variable(k, v, is_index=k in variables.xindexes)
         li_items.append(f"<li class='xr-var-item'>{li_content}</li>")
 
@@ -144,7 +150,7 @@ def summarize_index(coord_names, index)
     name = "<br>".join([escape(str(n)) for n in coord_names])
 
     index_id = f"index-{uuid.uuid4()}"
-    preview = escape(inline_index_repr(index))
+    preview = escape(inline_index_repr(index, max_width=70))
     details = short_index_repr_html(index)
 
     data_icon = _icon("icon-database")
@@ -192,7 +198,13 @@ def collapsible_section(
 
 
 def _mapping_section(
-    mapping, name, details_func, max_items_collapse, expand_option_name, enabled=True
+    mapping,
+    name,
+    details_func,
+    max_items_collapse,
+    expand_option_name,
+    enabled=True,
+    max_option_name: Literal["display_max_children"] | None = None,
 ) -> str:
     n_items = len(mapping)
     expanded = _get_boolean_with_default(
@@ -200,8 +212,15 @@ def _mapping_section(
     )
     collapsed = not expanded
 
+    inline_details = ""
+    if max_option_name and max_option_name in OPTIONS:
+        max_items = int(OPTIONS[max_option_name])
+        if n_items > max_items:
+            inline_details = f"({max_items}/{n_items})"
+
     return collapsible_section(
         name,
+        inline_details=inline_details,
         details=details_func(mapping),
         n_items=n_items,
         enabled=enabled,
@@ -309,24 +328,33 @@ def array_repr(arr) -> str:
         indexed_dims = {}
 
     obj_type = f"xarray.{type(arr).__name__}"
-    arr_name = f"'{arr.name}'" if getattr(arr, "name", None) else ""
+    arr_name = escape(repr(arr.name)) if getattr(arr, "name", None) else ""
 
     header_components = [
         f"<div class='xr-obj-type'>{obj_type}</div>",
-        f"<div class='xr-array-name'>{arr_name}</div>",
+        f"<div class='xr-obj-name'>{arr_name}</div>",
         format_dims(dims, indexed_dims),
     ]
 
     sections = [array_section(arr)]
 
     if hasattr(arr, "coords"):
-        sections.append(coord_section(arr.coords))
+        if arr.coords:
+            sections.append(coord_section(arr.coords))
 
     if hasattr(arr, "xindexes"):
-        indexes = _get_indexes_dict(arr.xindexes)
-        sections.append(index_section(indexes))
+        display_default_indexes = _get_boolean_with_default(
+            "display_default_indexes", False
+        )
+        xindexes = filter_nondefault_indexes(
+            _get_indexes_dict(arr.xindexes), not display_default_indexes
+        )
+        if xindexes:
+            indexes = _get_indexes_dict(arr.xindexes)
+            sections.append(index_section(indexes))
 
-    sections.append(attr_section(arr.attrs))
+    if arr.attrs:
+        sections.append(attr_section(arr.attrs))
 
     return _obj_repr(arr, header_components, sections)
 
@@ -336,65 +364,33 @@ def dataset_repr(ds) -> str:
 
     header_components = [f"<div class='xr-obj-type'>{escape(obj_type)}</div>"]
 
-    sections = [
-        dim_section(ds),
-        coord_section(ds.coords),
-        datavar_section(ds.data_vars),
-        index_section(_get_indexes_dict(ds.xindexes)),
-        attr_section(ds.attrs),
-    ]
+    sections = []
 
-    return _obj_repr(ds, header_components, sections)
+    sections.append(dim_section(ds))
 
+    if ds.coords:
+        sections.append(coord_section(ds.coords))
 
-def summarize_datatree_children(children: Mapping[str, DataTree]) -> str:
-    N_CHILDREN = len(children) - 1
+    sections.append(datavar_section(ds.data_vars))
 
-    # Get result from datatree_node_repr and wrap it
-    lines_callback = lambda n, c, end: _wrap_datatree_repr(
-        datatree_node_repr(n, c), end=end
+    display_default_indexes = _get_boolean_with_default(
+        "display_default_indexes", False
     )
-
-    children_html = "".join(
-        (
-            lines_callback(n, c, end=False)  # Long lines
-            if i < N_CHILDREN
-            else lines_callback(n, c, end=True)
-        )  # Short lines
-        for i, (n, c) in enumerate(children.items())
-    )
-
-    return "".join(
-        [
-            "<div style='display: inline-grid; grid-template-columns: 100%; grid-column: 1 / -1'>",
-            children_html,
-            "</div>",
-        ]
+    xindexes = filter_nondefault_indexes(
+        _get_indexes_dict(ds.xindexes), not display_default_indexes
     )
+    if xindexes:
+        sections.append(index_section(xindexes))
 
+    if ds.attrs:
+        sections.append(attr_section(ds.attrs))
 
-children_section = partial(
-    _mapping_section,
-    name="Groups",
-    details_func=summarize_datatree_children,
-    max_items_collapse=1,
-    expand_option_name="display_expand_groups",
-)
-
-inherited_coord_section = partial(
-    _mapping_section,
-    name="Inherited coordinates",
-    details_func=summarize_coords,
-    max_items_collapse=25,
-    expand_option_name="display_expand_coords",
-)
+    return _obj_repr(ds, header_components, sections)
 
 
-def datatree_node_repr(group_title: str, node: DataTree, show_inherited=False) -> str:
+def datatree_node_sections(node: DataTree, root: bool = False) -> list[str]:
     from xarray.core.coordinates import Coordinates
 
-    header_components = [f"<div class='xr-obj-type'>{escape(group_title)}</div>"]
-
     ds = node._to_dataset_view(rebuild_dims=False, inherit=True)
     node_coords = node.to_dataset(inherit=False).coords
 
@@ -404,94 +400,130 @@ def datatree_node_repr(group_title: str,
         indexes=inherited_vars(node._indexes),
     )
 
-    sections = [
-        children_section(node.children),
-        dim_section(ds),
-        coord_section(node_coords),
-    ]
+    # Only show dimensions if also showing a variable or coordinates section.
+    show_dims = (
+        node._node_coord_variables
+        or (root and inherited_coords)
+        or node._data_variables
+    )
+
+    sections = []
+
+    if node.children:
+        children_max_items = 1 if ds.data_vars else 6
+        sections.append(
+            children_section(node.children, max_items_collapse=children_max_items)
+        )
+
+    if show_dims:
+        sections.append(dim_section(ds))
+
+    if node_coords:
+        sections.append(coord_section(node_coords))
 
     # only show inherited coordinates on the root
-    if show_inherited:
+    if root and inherited_coords:
         sections.append(inherited_coord_section(inherited_coords))
 
-    sections += [
-        datavar_section(ds.data_vars),
-        attr_section(ds.attrs),
-    ]
+    if ds.data_vars:
+        sections.append(datavar_section(ds.data_vars))
 
-    return _obj_repr(ds, header_components, sections)
+    if ds.attrs:
+        sections.append(attr_section(ds.attrs))
 
+    return sections
 
-def _wrap_datatree_repr(r: str, end: bool = False) -> str:
-    """
-    Wrap HTML representation with a tee to the left of it.
 
-    Enclosing HTML tag is a <div> with :code:`display: inline-grid` style.
+def summarize_datatree_children(children: Mapping[str, DataTree]) -> str:
+    MAX_CHILDREN = OPTIONS["display_max_children"]
+    n_children = len(children)
 
-    Turns:
-    [    title    ]
-    |   details   |
-    |_____________|
-
-    into (A):
-    |─ [    title    ]
-    |  |   details   |
-    |  |_____________|
-
-    or (B):
-    └─ [    title    ]
-       |   details   |
-       |_____________|
-
-    Parameters
-    ----------
-    r: str
-        HTML representation to wrap.
-    end: bool
-        Specify if the line on the left should continue or end.
-
-        Default is True.
-
-    Returns
-    -------
-    str
-        Wrapped HTML representation.
+    children_html = []
+    for i, child in enumerate(children.values()):
+        if i < ceil(MAX_CHILDREN / 2) or i >= ceil(n_children - MAX_CHILDREN / 2):
+            is_last = i == (n_children - 1)
+            children_html.append(datatree_child_repr(child, end=is_last))
+        elif n_children > MAX_CHILDREN and i == ceil(MAX_CHILDREN / 2):
+            children_html.append("<div>...</div>")
 
-        Tee color is set to the variable :code:`--xr-border-color`.
-    """
-    # height of line
-    end = bool(end)
-    height = "100%" if end is False else "1.2em"
     return "".join(
         [
-            "<div style='display: inline-grid; grid-template-columns: 0px 20px auto; width: 100%;'>",
-            "<div style='",
-            "grid-column-start: 1;",
-            "border-right: 0.2em solid;",
-            "border-color: var(--xr-border-color);",
-            f"height: {height};",
-            "width: 0px;",
-            "'>",
-            "</div>",
-            "<div style='",
-            "grid-column-start: 2;",
-            "grid-row-start: 1;",
-            "height: 1em;",
-            "width: 20px;",
-            "border-bottom: 0.2em solid;",
-            "border-color: var(--xr-border-color);",
-            "'>",
-            "</div>",
-            "<div style='",
-            "grid-column-start: 3;",
-            "'>",
-            r,
-            "</div>",
+            "<div style='display: inline-grid; grid-template-columns: 100%; grid-column: 1 / -1'>",
+            "".join(children_html),
             "</div>",
         ]
     )
 
 
-def datatree_repr(dt: DataTree) -> str:
-    obj_type = f"xarray.{type(dt).__name__}"
-    return datatree_node_repr(obj_type, dt, show_inherited=True)
+children_section = partial(
+    _mapping_section,
+    name="Groups",
+    details_func=summarize_datatree_children,
+    max_option_name="display_max_children",
+    expand_option_name="display_expand_groups",
+)
+
+inherited_coord_section = partial(
+    _mapping_section,
+    name="Inherited coordinates",
+    details_func=summarize_coords,
+    max_items_collapse=25,
+    expand_option_name="display_expand_coords",
+)
+
+
+def datatree_child_repr(node: DataTree, end: bool = False) -> str:
+    # Wrap DataTree HTML representation with a tee to the left of it.
+    #
+    # Enclosing HTML tag is a <div> with :code:`display: inline-grid` style.
+    #
+    # Turns:
+    # [    title    ]
+    # |   details   |
+    # |_____________|
+    #
+    # into (A):
+    # |─ [    title    ]
+    # |  |   details   |
+    # |  |_____________|
+    #
+    # or (B):
+    # └─ [    title    ]
+    #    |   details   |
+    #    |_____________|
+    end = bool(end)
+    height = "100%" if end is False else "1.2em"  # height of line
+
+    path = escape(node.path)
+    sections = datatree_node_sections(node, root=False)
+    section_items = "".join(f"<li class='xr-section-item'>{s}</li>" for s in sections)
+
+    # TODO: Can we make the group name clickable to toggle the sections below?
+    # This looks like it would require the input/label pattern used above.
+    html = f"""
+        <div class='xr-group-box'>
+            <div class='xr-group-box-vline' style='height: {height}'></div>
+            <div class='xr-group-box-hline'></div>
+            <div class='xr-group-box-contents'>
+                <div class='xr-header'>
+                    <div class='xr-group-name'>{path}</div>
+                </div>
+                <ul class='xr-sections'>
+                    {section_items}
+                </ul>
+            </div>
+        </div>
+    """
+    return "".join(t.strip() for t in html.split("\n"))
+
+
+def datatree_repr(node: DataTree) -> str:
+    header_components = [
+        f"<div class='xr-obj-type'>xarray.{type(node).__name__}</div>",
+    ]
+    if node.name is not None:
+        name = escape(repr(node.name))
+        header_components.append(f"<div class='xr-obj-name'>{name}</div>")
+
+    sections = datatree_node_sections(node, root=True)
+    return _obj_repr(node, header_components, sections)
diff -pruN 2025.03.1-8/xarray/core/groupby.py 2025.10.1-1/xarray/core/groupby.py
--- 2025.03.1-8/xarray/core/groupby.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/xarray/core/groupby.py	2025-10-10 10:38:05.000000000 +0000
@@ -22,8 +22,12 @@ from xarray.core._aggregations import (
     DataArrayGroupByAggregations,
     DatasetGroupByAggregations,
 )
-from xarray.core.common import ImplementsArrayReduce, ImplementsDatasetReduce
-from xarray.core.coordinates import Coordinates, _coordinates_from_variable
+from xarray.core.common import (
+    ImplementsArrayReduce,
+    ImplementsDatasetReduce,
+    _is_numeric_aggregatable_dtype,
+)
+from xarray.core.coordinates import Coordinates, coordinates_from_variable
 from xarray.core.duck_array_ops import where
 from xarray.core.formatting import format_array_flat
 from xarray.core.indexes import (
@@ -78,7 +82,8 @@ def check_reduce_dims(reduce_dims, dimen
         if any(dim not in dimensions for dim in reduce_dims):
             raise ValueError(
                 f"cannot reduce over dimensions {reduce_dims!r}. expected either '...' "
-                f"to reduce over all dimensions or one or more of {dimensions!r}."
+                f"to reduce over all dimensions or one or more of {dimensions!r}. "
+                f"Alternatively, install the `flox` package. "
             )
 
 
@@ -206,7 +211,7 @@ class _DummyGroup(Generic[T_Xarray]):
         return np.arange(self.size, dtype=int)
 
     def __array__(
-        self, dtype: np.typing.DTypeLike = None, /, *, copy: bool | None = None
+        self, dtype: np.typing.DTypeLike | None = None, /, *, copy: bool | None = None
     ) -> np.ndarray:
         if copy is False:
             raise NotImplementedError(f"An array copy is necessary, got {copy = }.")
@@ -262,6 +267,8 @@ def _ensure_1d(
     from xarray.core.dataarray import DataArray
 
     if isinstance(group, DataArray):
+        for dim in set(group.dims) - set(obj.dims):
+            obj = obj.expand_dims(dim)
         # try to stack the dims of the group into a single dim
         orig_dims = group.dims
         stacked_dim = "stacked_" + "_".join(map(str, orig_dims))
@@ -294,7 +301,7 @@ class ResolvedGrouper(Generic[T_DataWith
     grouper: Grouper
     group: T_Group
     obj: T_DataWithCoords
-    eagerly_compute_group: bool = field(repr=False)
+    eagerly_compute_group: Literal[False] | None = field(repr=False, default=None)
 
     # returned by factorize:
     encoded: EncodedGroups = field(init=False, repr=False)
@@ -323,39 +330,38 @@ class ResolvedGrouper(Generic[T_DataWith
 
         self.group = _resolve_group(self.obj, self.group)
 
+        if self.eagerly_compute_group:
+            raise ValueError(
+                f""""Eagerly computing the DataArray you're grouping by ({self.group.name!r}) "
+                has been removed.
+                Please load this array's data manually using `.compute` or `.load`.
+                To intentionally avoid eager loading, either (1) specify
+                `.groupby({self.group.name}=UniqueGrouper(labels=...))`
+                or (2) pass explicit bin edges using ``bins`` or
+                `.groupby({self.group.name}=BinGrouper(bins=...))`; as appropriate."""
+            )
+        if self.eagerly_compute_group is not None:
+            emit_user_level_warning(
+                "Passing `eagerly_compute_group` is now deprecated. It has no effect.",
+                DeprecationWarning,
+            )
+
         if not isinstance(self.group, _DummyGroup) and is_chunked_array(
             self.group.variable._data
         ):
-            if self.eagerly_compute_group is False:
-                # This requires a pass to discover the groups present
-                if (
-                    isinstance(self.grouper, UniqueGrouper)
-                    and self.grouper.labels is None
-                ):
-                    raise ValueError(
-                        "Please pass `labels` to UniqueGrouper when grouping by a chunked array."
-                    )
-                # this requires a pass to compute the bin edges
-                if isinstance(self.grouper, BinGrouper) and isinstance(
-                    self.grouper.bins, int
-                ):
-                    raise ValueError(
-                        "Please pass explicit bin edges to BinGrouper using the ``bins`` kwarg"
-                        "when grouping by a chunked array."
-                    )
-
-            if self.eagerly_compute_group:
-                emit_user_level_warning(
-                    f""""Eagerly computing the DataArray you're grouping by ({self.group.name!r}) "
-                    is deprecated and will raise an error in v2025.05.0.
-                    Please load this array's data manually using `.compute` or `.load`.
-                    To intentionally avoid eager loading, either (1) specify
-                    `.groupby({self.group.name}=UniqueGrouper(labels=...), eagerly_load_group=False)`
-                    or (2) pass explicit bin edges using or `.groupby({self.group.name}=BinGrouper(bins=...),
-                    eagerly_load_group=False)`; as appropriate.""",
-                    DeprecationWarning,
+            # This requires a pass to discover the groups present
+            if isinstance(self.grouper, UniqueGrouper) and self.grouper.labels is None:
+                raise ValueError(
+                    "Please pass `labels` to UniqueGrouper when grouping by a chunked array."
+                )
+            # this requires a pass to compute the bin edges
+            if isinstance(self.grouper, BinGrouper) and isinstance(
+                self.grouper.bins, int
+            ):
+                raise ValueError(
+                    "Please pass explicit bin edges to BinGrouper using the ``bins`` kwarg"
+                    "when grouping by a chunked array."
                 )
-                self.group = self.group.compute()
 
         self.encoded = self.grouper.factorize(self.group)
 
@@ -381,11 +387,10 @@ def _parse_group_and_groupers(
     group: GroupInput,
     groupers: dict[str, Grouper],
     *,
-    eagerly_compute_group: bool,
+    eagerly_compute_group: Literal[False] | None,
 ) -> tuple[ResolvedGrouper, ...]:
     from xarray.core.dataarray import DataArray
-    from xarray.core.variable import Variable
-    from xarray.groupers import UniqueGrouper
+    from xarray.groupers import Grouper, UniqueGrouper
 
     if group is not None and groupers:
         raise ValueError(
@@ -400,6 +405,13 @@ def _parse_group_and_groupers(
             f"`group` must be a DataArray. Received {type(group).__name__!r} instead"
         )
 
+    if isinstance(group, Grouper):
+        raise TypeError(
+            "Cannot group by a Grouper object. "
+            f"Instead use `.groupby(var_name={type(group).__name__}(...))`. "
+            "You may need to assign the variable you're grouping by as a coordinate using `assign_coords`."
+        )
+
     if isinstance(group, Mapping):
         grouper_mapping = either_dict_or_kwargs(group, groupers, "groupby")
         group = None
@@ -531,7 +543,7 @@ class ComposedGrouper:
         _flatcodes = where(mask.data, -1, _flatcodes)
 
         full_index = pd.MultiIndex.from_product(
-            list(grouper.full_index.values for grouper in groupers),
+            [list(grouper.full_index.values) for grouper in groupers],
             names=tuple(grouper.name for grouper in groupers),
         )
         if not full_index.is_unique:
@@ -661,18 +673,26 @@ class GroupBy(Generic[T_Xarray]):
         # specification for the groupby operation
         # TODO: handle obj having variables that are not present on any of the groupers
         #       simple broadcasting fails for ExtensionArrays.
-        # FIXME: Skip this stacking when grouping by a dask array, it's useless in that case.
-        (self.group1d, self._obj, self._stacked_dim, self._inserted_dims) = _ensure_1d(
-            group=self.encoded.codes, obj=obj
-        )
-        (self._group_dim,) = self.group1d.dims
+        codes = self.encoded.codes
+        self._by_chunked = is_chunked_array(codes._variable._data)
+        if not self._by_chunked:
+            (self.group1d, self._obj, self._stacked_dim, self._inserted_dims) = (
+                _ensure_1d(group=codes, obj=obj)
+            )
+            (self._group_dim,) = self.group1d.dims
+        else:
+            self.group1d = None
+            # This transpose preserves dim order behaviour
+            self._obj = obj.transpose(..., *codes.dims)
+            self._stacked_dim = None
+            self._inserted_dims = []
+            self._group_dim = None
 
         # cached attributes
         self._groups = None
         self._dims = None
         self._sizes = None
         self._len = len(self.encoded.full_index)
-        self._by_chunked = is_chunked_array(self.encoded.codes.data)
 
     @property
     def sizes(self) -> Mapping[Hashable, int]:
@@ -730,8 +750,8 @@ class GroupBy(Generic[T_Xarray]):
         <xarray.DataArray 'a' (x: 4)> Size: 32B
         array([9., 3., 4., 5.])
         Coordinates:
-            quantile  float64 8B 0.5
           * x         (x) int64 32B 0 1 2 3
+            quantile  float64 8B 0.5
 
         See Also
         --------
@@ -807,7 +827,7 @@ class GroupBy(Generic[T_Xarray]):
             self._groups = dict(
                 zip(
                     self.encoded.unique_coord.data,
-                    self.encoded.group_indices,
+                    tuple(g for g in self.encoded.group_indices if g),
                     strict=True,
                 )
             )
@@ -817,6 +837,7 @@ class GroupBy(Generic[T_Xarray]):
         """
         Get DataArray or Dataset corresponding to a particular group label.
         """
+        self._raise_if_by_is_chunked()
         return self._obj.isel({self._group_dim: self.groups[key]})
 
     def __len__(self) -> int:
@@ -834,7 +855,10 @@ class GroupBy(Generic[T_Xarray]):
         for grouper in self.groupers:
             coord = grouper.unique_coord
             labels = ", ".join(format_array_flat(coord, 30).split())
-            text += f"\n    {grouper.name!r}: {coord.size}/{grouper.full_index.size} groups present with labels {labels}"
+            text += (
+                f"\n    {grouper.name!r}: {type(grouper.grouper).__name__}({grouper.group.name!r}), "
+                f"{coord.size}/{grouper.full_index.size} groups with labels {labels}"
+            )
         return text + ">"
 
     def _iter_grouped(self) -> Iterator[T_Xarray]:
@@ -963,13 +987,12 @@ class GroupBy(Generic[T_Xarray]):
         indexers = {}
         for grouper in self.groupers:
             index = combined._indexes.get(grouper.name, None)
-            if has_missing_groups and index is not None:
+            if (has_missing_groups and index is not None) or (
+                len(self.groupers) > 1
+                and not isinstance(grouper.full_index, pd.RangeIndex)
+                and not index.index.equals(grouper.full_index)
+            ):
                 indexers[grouper.name] = grouper.full_index
-            elif len(self.groupers) > 1:
-                if not isinstance(
-                    grouper.full_index, pd.RangeIndex
-                ) and not index.index.equals(grouper.full_index):
-                    indexers[grouper.name] = grouper.full_index
         if indexers:
             combined = combined.reindex(**indexers)
         return combined
@@ -1048,7 +1071,7 @@ class GroupBy(Generic[T_Xarray]):
                 name: var
                 for name, var in variables.items()
                 if (
-                    not (np.issubdtype(var.dtype, np.number) or (var.dtype == np.bool_))
+                    not _is_numeric_aggregatable_dtype(var)
                     # this avoids dropping any levels of a MultiIndex, which raises
                     # a warning
                     and name not in midx_grouping_vars
@@ -1072,7 +1095,7 @@ class GroupBy(Generic[T_Xarray]):
             parsed_dim_list = list()
             # preserve order
             for dim_ in itertools.chain(
-                *(grouper.group.dims for grouper in self.groupers)
+                *(grouper.codes.dims for grouper in self.groupers)
             ):
                 if dim_ not in parsed_dim_list:
                     parsed_dim_list.append(dim_)
@@ -1086,7 +1109,7 @@ class GroupBy(Generic[T_Xarray]):
         # Better to control it here than in flox.
         for grouper in self.groupers:
             if any(
-                d not in grouper.group.dims and d not in obj.dims for d in parsed_dim
+                d not in grouper.codes.dims and d not in obj.dims for d in parsed_dim
             ):
                 raise ValueError(f"cannot reduce over dimensions {dim}.")
 
@@ -1126,7 +1149,7 @@ class GroupBy(Generic[T_Xarray]):
         group_dims = set(grouper.group.dims)
         new_coords = []
         to_drop = []
-        if group_dims.issubset(set(parsed_dim)):
+        if group_dims & set(parsed_dim):
             for grouper in self.groupers:
                 output_index = grouper.full_index
                 if isinstance(output_index, pd.RangeIndex):
@@ -1138,7 +1161,7 @@ class GroupBy(Generic[T_Xarray]):
                 new_coords.append(
                     # Using IndexVariable here ensures we reconstruct PandasMultiIndex with
                     # all associated levels properly.
-                    _coordinates_from_variable(
+                    coordinates_from_variable(
                         IndexVariable(
                             dims=grouper.name,
                             data=output_index,
@@ -1290,15 +1313,15 @@ class GroupBy(Generic[T_Xarray]):
         array([[0.7, 4.2, 0.7, 1.5],
                [6.5, 7.3, 2.6, 1.9]])
         Coordinates:
+          * x         (x) int64 16B 0 1
           * y         (y) int64 32B 1 1 2 2
             quantile  float64 8B 0.0
-          * x         (x) int64 16B 0 1
         >>> ds.groupby("y").quantile(0, dim=...)
         <xarray.Dataset> Size: 40B
         Dimensions:   (y: 2)
         Coordinates:
-            quantile  float64 8B 0.0
           * y         (y) int64 16B 1 2
+            quantile  float64 8B 0.0
         Data variables:
             a         (y) float64 16B 0.7 0.7
         >>> da.groupby("x").quantile([0, 0.5, 1])
@@ -1313,15 +1336,15 @@ class GroupBy(Generic[T_Xarray]):
                 [2.6 , 2.6 , 2.6 ],
                 [1.9 , 1.9 , 1.9 ]]])
         Coordinates:
+          * x         (x) int64 16B 0 1
           * y         (y) int64 32B 1 1 2 2
           * quantile  (quantile) float64 24B 0.0 0.5 1.0
-          * x         (x) int64 16B 0 1
         >>> ds.groupby("y").quantile([0, 0.5, 1], dim=...)
         <xarray.Dataset> Size: 88B
         Dimensions:   (y: 2, quantile: 3)
         Coordinates:
-          * quantile  (quantile) float64 24B 0.0 0.5 1.0
           * y         (y) int64 16B 1 2
+          * quantile  (quantile) float64 24B 0.0 0.5 1.0
         Data variables:
             a         (y, quantile) float64 48B 0.7 5.35 8.4 0.7 2.25 9.4
 
@@ -1331,9 +1354,6 @@ class GroupBy(Generic[T_Xarray]):
            "Sample quantiles in statistical packages,"
            The American Statistician, 50(4), pp. 361-365, 1996
         """
-        if dim is None:
-            dim = (self._group_dim,)
-
         # Dataset.quantile does this, do it for flox to ensure same output.
         q = np.asarray(q, dtype=np.float64)
 
@@ -1348,11 +1368,13 @@ class GroupBy(Generic[T_Xarray]):
             )
             return result
         else:
+            if dim is None:
+                dim = (self._group_dim,)
             return self.map(
                 self._obj.__class__.quantile,
                 shortcut=False,
                 q=q,
-                dim=dim,
+                dim=dim or self._group_dim,
                 method=method,
                 keep_attrs=keep_attrs,
                 skipna=skipna,
@@ -1491,6 +1513,7 @@ class DataArrayGroupByBase(GroupBy["Data
 
     @property
     def dims(self) -> tuple[Hashable, ...]:
+        self._raise_if_by_is_chunked()
         if self._dims is None:
             index = self.encoded.group_indices[0]
             self._dims = self._obj.isel({self._group_dim: index}).dims
@@ -1608,7 +1631,14 @@ class DataArrayGroupByBase(GroupBy["Data
         if shortcut:
             combined = self._concat_shortcut(applied, dim, positions)
         else:
-            combined = concat(applied, dim)
+            combined = concat(
+                applied,
+                dim,
+                data_vars="all",
+                coords="different",
+                compat="equals",
+                join="outer",
+            )
             combined = _maybe_reorder(combined, dim, positions, N=self.group1d.size)
 
         if isinstance(combined, type(self._obj)):
@@ -1702,6 +1732,7 @@ class DatasetGroupByBase(GroupBy["Datase
 
     @property
     def dims(self) -> Frozen[Hashable, int]:
+        self._raise_if_by_is_chunked()
         if self._dims is None:
             index = self.encoded.group_indices[0]
             self._dims = self._obj.isel({self._group_dim: index}).dims
@@ -1768,7 +1799,14 @@ class DatasetGroupByBase(GroupBy["Datase
         """Recombine the applied objects like the original."""
         applied_example, applied = peek_at(applied)
         dim, positions = self._infer_concat_args(applied_example)
-        combined = concat(applied, dim)
+        combined = concat(
+            applied,
+            dim,
+            data_vars="all",
+            coords="different",
+            compat="equals",
+            join="outer",
+        )
         combined = _maybe_reorder(combined, dim, positions, N=self.group1d.size)
         # assign coord when the applied function does not return that coord
         if dim not in applied_example.dims:
diff -pruN 2025.03.1-8/xarray/core/indexes.py 2025.10.1-1/xarray/core/indexes.py
--- 2025.03.1-8/xarray/core/indexes.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/xarray/core/indexes.py	2025-10-10 10:38:05.000000000 +0000
@@ -2,15 +2,17 @@ from __future__ import annotations
 
 import collections.abc
 import copy
+import inspect
 from collections import defaultdict
-from collections.abc import Hashable, Iterable, Iterator, Mapping, Sequence
-from typing import TYPE_CHECKING, Any, Generic, TypeVar, cast
+from collections.abc import Callable, Hashable, Iterable, Iterator, Mapping, Sequence
+from typing import TYPE_CHECKING, Any, Generic, TypeVar, cast, overload
 
 import numpy as np
 import pandas as pd
 
 from xarray.core import formatting, nputils, utils
 from xarray.core.coordinate_transform import CoordinateTransform
+from xarray.core.extension_array import PandasExtensionArray
 from xarray.core.indexing import (
     CoordinateTransformIndexingAdapter,
     IndexSelResult,
@@ -21,6 +23,7 @@ from xarray.core.utils import (
     Frozen,
     emit_user_level_warning,
     get_valid_numpy_dtype,
+    is_allowed_extension_array_dtype,
     is_dict_like,
     is_scalar,
 )
@@ -195,6 +198,49 @@ class Index:
         else:
             return {}
 
+    def should_add_coord_to_array(
+        self,
+        name: Hashable,
+        var: Variable,
+        dims: set[Hashable],
+    ) -> bool:
+        """Define whether or not an index coordinate variable should be added to
+        a new DataArray.
+
+        This method is called repeatedly for each Variable associated with this
+        index when creating a new DataArray (via its constructor or from a
+        Dataset) or updating an existing one. The variables associated with this
+        index are the ones passed to :py:meth:`Index.from_variables` and/or
+        returned by :py:meth:`Index.create_variables`.
+
+        By default returns ``True`` if the dimensions of the coordinate variable
+        are a subset of the array dimensions and ``False`` otherwise (DataArray
+        model). This default behavior may be overridden in Index subclasses to
+        bypass strict conformance with the DataArray model. This is useful for
+        example to include the (n+1)-dimensional cell boundary coordinate
+        associated with an interval index.
+
+        Returning ``False`` will either:
+
+        - raise a :py:class:`CoordinateValidationError` when passing the
+          coordinate directly to a new or an existing DataArray, e.g., via
+          ``DataArray.__init__()`` or ``DataArray.assign_coords()``
+
+        - drop the coordinate (and therefore drop the index) when a new
+          DataArray is constructed by indexing a Dataset
+
+        Parameters
+        ----------
+        name : Hashable
+            Name of a coordinate variable associated to this index.
+        var : Variable
+            Coordinate variable object.
+        dims: tuple
+            Dimensions of the new DataArray object being created.
+
+        """
+        return all(d in dims for d in var.dims)
+
     def to_pandas_index(self) -> pd.Index:
         """Cast this xarray index to a pandas.Index object or raise a
         ``TypeError`` if this is not supported.
@@ -304,7 +350,15 @@ class Index:
         """
         raise NotImplementedError(f"{self!r} doesn't support re-indexing labels")
 
-    def equals(self, other: Index) -> bool:
+    @overload
+    def equals(self, other: Index) -> bool: ...
+
+    @overload
+    def equals(
+        self, other: Index, *, exclude: frozenset[Hashable] | None = None
+    ) -> bool: ...
+
+    def equals(self, other: Index, **kwargs) -> bool:
         """Compare this index with another index of the same type.
 
         Implementation is optional but required in order to support alignment.
@@ -313,11 +367,22 @@ class Index:
         ----------
         other : Index
             The other Index object to compare with this object.
+        exclude : frozenset of hashable, optional
+            Dimensions excluded from checking. It is None by default, (i.e.,
+            when this method is not called in the context of alignment). For a
+            n-dimensional index this option allows an Index to optionally ignore
+            any dimension in ``exclude`` when comparing ``self`` with ``other``.
+            For a 1-dimensional index this kwarg can be safely ignored, as this
+            method is not called when all of the index's dimensions are also
+            excluded from alignment (note: the index's dimensions correspond to
+            the union of the dimensions of all coordinate variables associated
+            with this index).
 
         Returns
         -------
         is_equal : bool
             ``True`` if the indexes are equal, ``False`` otherwise.
+
         """
         raise NotImplementedError()
 
@@ -415,7 +480,7 @@ class Index:
     def __getitem__(self, indexer: Any) -> Self:
         raise NotImplementedError()
 
-    def _repr_inline_(self, max_width):
+    def _repr_inline_(self, max_width: int) -> str:
         return self.__class__.__name__
 
 
@@ -444,6 +509,8 @@ def safe_cast_to_index(array: Any) -> pd
     from xarray.core.variable import Variable
     from xarray.namedarray.pycompat import to_numpy
 
+    if isinstance(array, PandasExtensionArray):
+        array = pd.Index(array.array)
     if isinstance(array, pd.Index):
         index = array
     elif isinstance(array, DataArray | Variable):
@@ -600,9 +667,12 @@ class PandasIndex(Index):
 
         self.index = index
         self.dim = dim
-
         if coord_dtype is None:
-            coord_dtype = get_valid_numpy_dtype(index)
+            if is_allowed_extension_array_dtype(index.dtype):
+                cast(pd.api.extensions.ExtensionDtype, index.dtype)
+                coord_dtype = index.dtype
+            else:
+                coord_dtype = get_valid_numpy_dtype(index)
         self.coord_dtype = coord_dtype
 
     def _replace(self, index, dim=None, coord_dtype=None):
@@ -647,7 +717,7 @@ class PandasIndex(Index):
 
         # preserve wrapped pd.Index (if any)
         # accessing `.data` can load data from disk, so we only access if needed
-        data = var._data.array if hasattr(var._data, "array") else var.data
+        data = var._data if isinstance(var._data, PandasIndexingAdapter) else var.data  # type: ignore[redundant-expr]
         # multi-index level variable: get level index
         if isinstance(var._data, PandasMultiIndexingAdapter):
             level = var._data.level
@@ -699,7 +769,11 @@ class PandasIndex(Index):
         if not indexes:
             coord_dtype = None
         else:
-            coord_dtype = np.result_type(*[idx.coord_dtype for idx in indexes])
+            indexes_coord_dtypes = {idx.coord_dtype for idx in indexes}
+            if len(indexes_coord_dtypes) == 1:
+                coord_dtype = next(iter(indexes_coord_dtypes))
+            else:
+                coord_dtype = np.result_type(*indexes_coord_dtypes)
 
         return cls(new_pd_index, dim=dim, coord_dtype=coord_dtype)
 
@@ -743,7 +817,7 @@ class PandasIndex(Index):
             # scalar indexer: drop index
             return None
 
-        return self._replace(self.index[indxr])  # type: ignore[index]
+        return self._replace(self.index[indxr])  # type: ignore[index,unused-ignore]
 
     def sel(
         self, labels: dict[Any, Any], method=None, tolerance=None
@@ -778,23 +852,18 @@ class PandasIndex(Index):
                             "'tolerance' is not supported when indexing using a CategoricalIndex."
                         )
                     indexer = self.index.get_loc(label_value)
+                elif method is not None:
+                    indexer = get_indexer_nd(self.index, label_array, method, tolerance)
+                    if np.any(indexer < 0):
+                        raise KeyError(f"not all values found in index {coord_name!r}")
                 else:
-                    if method is not None:
-                        indexer = get_indexer_nd(
-                            self.index, label_array, method, tolerance
-                        )
-                        if np.any(indexer < 0):
-                            raise KeyError(
-                                f"not all values found in index {coord_name!r}"
-                            )
-                    else:
-                        try:
-                            indexer = self.index.get_loc(label_value)
-                        except KeyError as e:
-                            raise KeyError(
-                                f"not all values found in index {coord_name!r}. "
-                                "Try setting the `method` keyword argument (example: method='nearest')."
-                            ) from e
+                    try:
+                        indexer = self.index.get_loc(label_value)
+                    except KeyError as e:
+                        raise KeyError(
+                            f"not all values found in index {coord_name!r}. "
+                            "Try setting the `method` keyword argument (example: method='nearest')."
+                        ) from e
 
             elif label_array.dtype.kind == "b":
                 indexer = label_array
@@ -811,7 +880,7 @@ class PandasIndex(Index):
 
         return IndexSelResult({self.dim: indexer})
 
-    def equals(self, other: Index):
+    def equals(self, other: Index, *, exclude: frozenset[Hashable] | None = None):
         if not isinstance(other, PandasIndex):
             return False
         return self.index.equals(other.index) and self.dim == other.dim
@@ -940,7 +1009,7 @@ class PandasMultiIndex(PandasIndex):
     index: pd.MultiIndex
     dim: Hashable
     coord_dtype: Any
-    level_coords_dtype: dict[str, Any]
+    level_coords_dtype: dict[Hashable | None, Any]
 
     __slots__ = ("coord_dtype", "dim", "index", "level_coords_dtype")
 
@@ -983,7 +1052,7 @@ class PandasMultiIndex(PandasIndex):
         dim = next(iter(variables.values())).dims[0]
 
         index = pd.MultiIndex.from_arrays(
-            [var.values for var in variables.values()], names=variables.keys()
+            [var.values for var in variables.values()], names=list(variables.keys())
         )
         index.name = dim
         level_coords_dtype = {name: var.dtype for name, var in variables.items()}
@@ -1039,7 +1108,7 @@ class PandasMultiIndex(PandasIndex):
         # https://github.com/pandas-dev/pandas/issues/14672
         if all(index.is_monotonic_increasing for index in level_indexes):
             index = pd.MultiIndex.from_product(
-                level_indexes, sortorder=0, names=variables.keys()
+                level_indexes, sortorder=0, names=list(variables.keys())
             )
         else:
             split_labels, levels = zip(
@@ -1049,7 +1118,7 @@ class PandasMultiIndex(PandasIndex):
             labels = [x.ravel().tolist() for x in labels_mesh]
 
             index = pd.MultiIndex(
-                levels=levels, codes=labels, sortorder=0, names=variables.keys()
+                levels=levels, codes=labels, sortorder=0, names=list(variables.keys())
             )
         level_coords_dtype = {k: var.dtype for k, var in variables.items()}
 
@@ -1123,7 +1192,8 @@ class PandasMultiIndex(PandasIndex):
             level_variables[name] = var
 
         codes_as_lists = [list(x) for x in codes]
-        index = pd.MultiIndex(levels=levels, codes=codes_as_lists, names=names)
+        levels_as_lists = [list(level) for level in levels]
+        index = pd.MultiIndex(levels=levels_as_lists, codes=codes_as_lists, names=names)
         level_coords_dtype = {k: var.dtype for k, var in level_variables.items()}
         obj = cls(index, dim, level_coords_dtype=level_coords_dtype)
         index_vars = obj.create_variables(level_variables)
@@ -1178,9 +1248,9 @@ class PandasMultiIndex(PandasIndex):
                 dtype = None
             else:
                 level = name
-                dtype = self.level_coords_dtype[name]  # type: ignore[index]  # TODO: are Hashables ok?
+                dtype = self.level_coords_dtype[name]
 
-            var = variables.get(name, None)
+            var = variables.get(name)
             if var is not None:
                 attrs = var.attrs
                 encoding = var.encoding
@@ -1329,7 +1399,7 @@ class PandasMultiIndex(PandasIndex):
             # variable(s) attrs and encoding metadata are propagated
             # when replacing the indexes in the resulting xarray object
             new_vars = new_index.create_variables()
-            indexes = cast(dict[Any, Index], {k: new_index for k in new_vars})
+            indexes = cast(dict[Any, Index], dict.fromkeys(new_vars, new_index))
 
             # add scalar variable for each dropped level
             variables = new_vars
@@ -1386,14 +1456,15 @@ class PandasMultiIndex(PandasIndex):
 class CoordinateTransformIndex(Index):
     """Helper class for creating Xarray indexes based on coordinate transforms.
 
-    EXPERIMENTAL (not ready for public use yet).
-
     - wraps a :py:class:`CoordinateTransform` instance
     - takes care of creating the index (lazy) coordinates
     - supports point-wise label-based selection
     - supports exact alignment only, by comparing indexes based on their transform
       (not on their explicit coordinate labels)
 
+    .. caution::
+        This API is experimental and subject to change. Please report any bugs or surprising
+        behaviour you encounter.
     """
 
     transform: CoordinateTransform
@@ -1490,18 +1561,33 @@ class CoordinateTransformIndex(Index):
 
         return IndexSelResult(results)
 
-    def equals(self, other: Index) -> bool:
+    def equals(
+        self, other: Index, *, exclude: frozenset[Hashable] | None = None
+    ) -> bool:
         if not isinstance(other, CoordinateTransformIndex):
             return False
-        return self.transform.equals(other.transform)
+        return self.transform.equals(other.transform, exclude=exclude)
 
     def rename(
         self,
         name_dict: Mapping[Any, Hashable],
         dims_dict: Mapping[Any, Hashable],
     ) -> Self:
-        # TODO: maybe update self.transform coord_names, dim_size and dims attributes
-        return self
+        coord_names = self.transform.coord_names
+        dims = self.transform.dims
+        dim_size = self.transform.dim_size
+
+        if not set(coord_names) & set(name_dict) and not set(dims) & set(dims_dict):
+            return self
+
+        new_transform = copy.deepcopy(self.transform)
+        new_transform.coord_names = tuple(name_dict.get(n, n) for n in coord_names)
+        new_transform.dims = tuple(str(dims_dict.get(d, d)) for d in dims)
+        new_transform.dim_size = {
+            str(dims_dict.get(d, d)): v for d, v in dim_size.items()
+        }
+
+        return type(self)(new_transform)
 
 
 def create_default_index_implicit(
@@ -1518,7 +1604,7 @@ def create_default_index_implicit(
     if all_variables is None:
         all_variables = {}
     if not isinstance(all_variables, Mapping):
-        all_variables = {k: None for k in all_variables}
+        all_variables = dict.fromkeys(all_variables)
 
     name = dim_variable.dims[0]
     array = getattr(dim_variable._data, "array", None)
@@ -1570,7 +1656,7 @@ class Indexes(collections.abc.Mapping, G
 
     """
 
-    _index_type: type[Index] | type[pd.Index]
+    _index_type: type[Index | pd.Index]
     _indexes: dict[Any, T_PandasOrXarrayIndex]
     _variables: dict[Any, Variable]
 
@@ -1588,7 +1674,7 @@ class Indexes(collections.abc.Mapping, G
         self,
         indexes: Mapping[Any, T_PandasOrXarrayIndex] | None = None,
         variables: Mapping[Any, Variable] | None = None,
-        index_type: type[Index] | type[pd.Index] = Index,
+        index_type: type[Index | pd.Index] = Index,
     ):
         """Constructor not for public consumption.
 
@@ -1809,7 +1895,7 @@ class Indexes(collections.abc.Mapping, G
             if convert_new_idx:
                 new_idx = new_idx.index  # type: ignore[attr-defined]
 
-            new_indexes.update({k: new_idx for k in coords})
+            new_indexes.update(dict.fromkeys(coords, new_idx))
             new_index_vars.update(idx_vars)
 
         return new_indexes, new_index_vars
@@ -1855,11 +1941,41 @@ def default_indexes(
         if name in dims and var.ndim == 1:
             index, index_vars = create_default_index_implicit(var, coords)
             if set(index_vars) <= coord_names:
-                indexes.update({k: index for k in index_vars})
+                indexes.update(dict.fromkeys(index_vars, index))
 
     return indexes
 
 
+def _wrap_index_equals(
+    index: Index,
+) -> Callable[[Index, frozenset[Hashable]], bool]:
+    # TODO: remove this Index.equals() wrapper (backward compatibility)
+
+    sig = inspect.signature(index.equals)
+
+    if len(sig.parameters) == 1:
+        index_cls_name = type(index).__module__ + "." + type(index).__qualname__
+        emit_user_level_warning(
+            f"the signature ``{index_cls_name}.equals(self, other)`` is deprecated. "
+            f"Please update it to "
+            f"``{index_cls_name}.equals(self, other, *, exclude=None)`` "
+            f"or kindly ask the maintainers of ``{index_cls_name}`` to do it. "
+            "See documentation of xarray.Index.equals() for more info.",
+            FutureWarning,
+        )
+        exclude_kwarg = False
+    else:
+        exclude_kwarg = True
+
+    def equals_wrapper(other: Index, exclude: frozenset[Hashable]) -> bool:
+        if exclude_kwarg:
+            return index.equals(other, exclude=exclude)
+        else:
+            return index.equals(other)
+
+    return equals_wrapper
+
+
 def indexes_equal(
     index: Index,
     other_index: Index,
@@ -1901,6 +2017,7 @@ def indexes_equal(
 
 def indexes_all_equal(
     elements: Sequence[tuple[Index, dict[Hashable, Variable]]],
+    exclude_dims: frozenset[Hashable],
 ) -> bool:
     """Check if indexes are all equal.
 
@@ -1925,9 +2042,11 @@ def indexes_all_equal(
 
     same_type = all(type(indexes[0]) is type(other_idx) for other_idx in indexes[1:])
     if same_type:
+        index_equals_func = _wrap_index_equals(indexes[0])
         try:
             not_equal = any(
-                not indexes[0].equals(other_idx) for other_idx in indexes[1:]
+                not index_equals_func(other_idx, exclude_dims)
+                for other_idx in indexes[1:]
             )
         except NotImplementedError:
             not_equal = check_variables()
@@ -1958,7 +2077,7 @@ def _apply_indexes_fast(indexes: Indexes
         if index_args:
             new_index = getattr(index, func)(index_args)
             if new_index is not None:
-                new_indexes.update({k: new_index for k in index_vars})
+                new_indexes.update(dict.fromkeys(index_vars, new_index))
                 new_index_vars = new_index.create_variables(index_vars)
                 new_index_variables.update(new_index_vars)
             else:
@@ -1981,7 +2100,7 @@ def _apply_indexes(
         if index_args:
             new_index = getattr(index, func)(index_args)
             if new_index is not None:
-                new_indexes.update({k: new_index for k in index_vars})
+                new_indexes.update(dict.fromkeys(index_vars, new_index))
                 new_index_vars = new_index.create_variables(index_vars)
                 new_index_variables.update(new_index_vars)
             else:
diff -pruN 2025.03.1-8/xarray/core/indexing.py 2025.10.1-1/xarray/core/indexing.py
--- 2025.03.1-8/xarray/core/indexing.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/xarray/core/indexing.py	2025-10-10 10:38:05.000000000 +0000
@@ -9,39 +9,44 @@ from collections.abc import Callable, Ha
 from contextlib import suppress
 from dataclasses import dataclass, field
 from datetime import timedelta
-from html import escape
-from typing import TYPE_CHECKING, Any, overload
+from typing import TYPE_CHECKING, Any, cast, overload
 
 import numpy as np
 import pandas as pd
+from numpy.typing import DTypeLike
 from packaging.version import Version
 
 from xarray.core import duck_array_ops
 from xarray.core.coordinate_transform import CoordinateTransform
 from xarray.core.nputils import NumpyVIndexAdapter
-from xarray.core.options import OPTIONS
 from xarray.core.types import T_Xarray
 from xarray.core.utils import (
     NDArrayMixin,
     either_dict_or_kwargs,
     get_valid_numpy_dtype,
+    is_allowed_extension_array,
+    is_allowed_extension_array_dtype,
     is_duck_array,
     is_duck_dask_array,
+    is_full_slice,
     is_scalar,
+    is_valid_numpy_dtype,
     to_0d_array,
 )
 from xarray.namedarray.parallelcompat import get_chunked_array_type
 from xarray.namedarray.pycompat import array_type, integer_types, is_chunked_array
 
 if TYPE_CHECKING:
-    from numpy.typing import DTypeLike
-
+    from xarray.core.extension_array import PandasExtensionArray
     from xarray.core.indexes import Index
     from xarray.core.types import Self
     from xarray.core.variable import Variable
     from xarray.namedarray._typing import _Shape, duckarray
     from xarray.namedarray.parallelcompat import ChunkManagerEntrypoint
 
+BasicIndexerType = int | np.integer | slice
+OuterIndexerType = BasicIndexerType | np.ndarray[Any, np.dtype[np.integer]]
+
 
 @dataclass
 class IndexSelResult:
@@ -240,16 +245,16 @@ def expanded_indexer(key, ndim):
     return tuple(new_key)
 
 
-def _normalize_slice(sl: slice, size: int) -> slice:
+def normalize_slice(sl: slice, size: int) -> slice:
     """
     Ensure that given slice only contains positive start and stop values
     (stop can be -1 for full-size slices with negative steps, e.g. [-10::-1])
 
     Examples
     --------
-    >>> _normalize_slice(slice(0, 9), 10)
+    >>> normalize_slice(slice(0, 9), 10)
     slice(0, 9, 1)
-    >>> _normalize_slice(slice(0, -1), 10)
+    >>> normalize_slice(slice(0, -1), 10)
     slice(0, 9, 1)
     """
     return slice(*sl.indices(size))
@@ -266,7 +271,7 @@ def _expand_slice(slice_: slice, size: i
     >>> _expand_slice(slice(0, -1), 10)
     array([0, 1, 2, 3, 4, 5, 6, 7, 8])
     """
-    sl = _normalize_slice(slice_, size)
+    sl = normalize_slice(slice_, size)
     return np.arange(sl.start, sl.stop, sl.step)
 
 
@@ -275,14 +280,14 @@ def slice_slice(old_slice: slice, applie
     index it with another slice to return a new slice equivalent to applying
     the slices sequentially
     """
-    old_slice = _normalize_slice(old_slice, size)
+    old_slice = normalize_slice(old_slice, size)
 
     size_after_old_slice = len(range(old_slice.start, old_slice.stop, old_slice.step))
     if size_after_old_slice == 0:
         # nothing left after applying first slice
         return slice(0)
 
-    applied_slice = _normalize_slice(applied_slice, size_after_old_slice)
+    applied_slice = normalize_slice(applied_slice, size_after_old_slice)
 
     start = old_slice.start + applied_slice.start * old_slice.step
     if start < 0:
@@ -299,19 +304,83 @@ def slice_slice(old_slice: slice, applie
     return slice(start, stop, step)
 
 
-def _index_indexer_1d(old_indexer, applied_indexer, size: int):
-    if isinstance(applied_indexer, slice) and applied_indexer == slice(None):
+def normalize_array(
+    array: np.ndarray[Any, np.dtype[np.integer]], size: int
+) -> np.ndarray[Any, np.dtype[np.integer]]:
+    """
+    Ensure that the given array only contains positive values.
+
+    Examples
+    --------
+    >>> normalize_array(np.array([-1, -2, -3, -4]), 10)
+    array([9, 8, 7, 6])
+    >>> normalize_array(np.array([-5, 3, 5, -1, 8]), 12)
+    array([ 7,  3,  5, 11,  8])
+    """
+    if np.issubdtype(array.dtype, np.unsignedinteger):
+        return array
+
+    return np.where(array >= 0, array, array + size)
+
+
+def slice_slice_by_array(
+    old_slice: slice,
+    array: np.ndarray[Any, np.dtype[np.integer]],
+    size: int,
+) -> np.ndarray[Any, np.dtype[np.integer]]:
+    """Given a slice and the size of the dimension to which it will be applied,
+    index it with an array to return a new array equivalent to applying
+    the slices sequentially
+
+    Examples
+    --------
+    >>> slice_slice_by_array(slice(2, 10), np.array([1, 3, 5]), 12)
+    array([3, 5, 7])
+    >>> slice_slice_by_array(slice(1, None, 2), np.array([1, 3, 7, 8]), 20)
+    array([ 3,  7, 15, 17])
+    >>> slice_slice_by_array(slice(None, None, -1), np.array([2, 4, 7]), 20)
+    array([17, 15, 12])
+    """
+    # to get a concrete slice, limited to the size of the array
+    normalized_slice = normalize_slice(old_slice, size)
+
+    size_after_slice = len(range(*normalized_slice.indices(size)))
+    normalized_array = normalize_array(array, size_after_slice)
+
+    new_indexer = normalized_array * normalized_slice.step + normalized_slice.start
+
+    if np.any(new_indexer >= size):
+        raise IndexError("indices out of bounds")  # TODO: more helpful error message
+
+    return new_indexer
+
+
+def _index_indexer_1d(
+    old_indexer: OuterIndexerType,
+    applied_indexer: OuterIndexerType,
+    size: int,
+) -> OuterIndexerType:
+    if is_full_slice(applied_indexer):
         # shortcut for the usual case
         return old_indexer
+    if is_full_slice(old_indexer):
+        # shortcut for full slices
+        return applied_indexer
+
+    indexer: OuterIndexerType
     if isinstance(old_indexer, slice):
         if isinstance(applied_indexer, slice):
             indexer = slice_slice(old_indexer, applied_indexer, size)
         elif isinstance(applied_indexer, integer_types):
-            indexer = range(*old_indexer.indices(size))[applied_indexer]  # type: ignore[assignment]
+            indexer = range(*old_indexer.indices(size))[applied_indexer]
         else:
-            indexer = _expand_slice(old_indexer, size)[applied_indexer]
-    else:
+            indexer = slice_slice_by_array(old_indexer, applied_indexer, size)
+    elif isinstance(old_indexer, np.ndarray):
         indexer = old_indexer[applied_indexer]
+    else:
+        # should be unreachable
+        raise ValueError("cannot index integers. Please open an issuec-")
+
     return indexer
 
 
@@ -388,7 +457,7 @@ class BasicIndexer(ExplicitIndexer):
 
     __slots__ = ()
 
-    def __init__(self, key: tuple[int | np.integer | slice, ...]):
+    def __init__(self, key: tuple[BasicIndexerType, ...]):
         if not isinstance(key, tuple):
             raise TypeError(f"key must be a tuple: {key!r}")
 
@@ -420,16 +489,14 @@ class OuterIndexer(ExplicitIndexer):
 
     def __init__(
         self,
-        key: tuple[
-            int | np.integer | slice | np.ndarray[Any, np.dtype[np.generic]], ...
-        ],
+        key: tuple[BasicIndexerType | np.ndarray[Any, np.dtype[np.generic]], ...],
     ):
         if not isinstance(key, tuple):
             raise TypeError(f"key must be a tuple: {key!r}")
 
         new_key = []
         for k in key:
-            if isinstance(k, integer_types):
+            if isinstance(k, integer_types) and not isinstance(k, bool):
                 k = int(k)
             elif isinstance(k, slice):
                 k = as_integer_slice(k)
@@ -443,10 +510,10 @@ class OuterIndexer(ExplicitIndexer):
                         f"invalid indexer array for {type(self).__name__}; must be scalar "
                         f"or have 1 dimension: {k!r}"
                     )
-                k = k.astype(np.int64)  # type: ignore[union-attr]
+                k = duck_array_ops.astype(k, np.int64, copy=False)
             else:
                 raise TypeError(
-                    f"unexpected indexer type for {type(self).__name__}: {k!r}"
+                    f"unexpected indexer type for {type(self).__name__}: {k!r}, {type(k)}"
                 )
             new_key.append(k)
 
@@ -487,7 +554,7 @@ class VectorizedIndexer(ExplicitIndexer)
                         "invalid indexer key: ndarray arguments "
                         f"have different numbers of dimensions: {ndims}"
                     )
-                k = k.astype(np.int64)  # type: ignore[union-attr]
+                k = duck_array_ops.astype(k, np.int64, copy=False)
             else:
                 raise TypeError(
                     f"unexpected indexer type for {type(self).__name__}: {k!r}"
@@ -503,7 +570,7 @@ class ExplicitlyIndexed:
     __slots__ = ()
 
     def __array__(
-        self, dtype: np.typing.DTypeLike = None, /, *, copy: bool | None = None
+        self, dtype: DTypeLike | None = None, /, *, copy: bool | None = None
     ) -> np.ndarray:
         # Leave casting to an array up to the underlying array type.
         if Version(np.__version__) >= Version("2.0.0"):
@@ -519,8 +586,10 @@ class ExplicitlyIndexedNDArrayMixin(NDAr
     __slots__ = ()
 
     def get_duck_array(self):
-        key = BasicIndexer((slice(None),) * self.ndim)
-        return self[key]
+        raise NotImplementedError
+
+    async def async_get_duck_array(self):
+        raise NotImplementedError
 
     def _oindex_get(self, indexer: OuterIndexer):
         raise NotImplementedError(
@@ -558,6 +627,22 @@ class ExplicitlyIndexedNDArrayMixin(NDAr
         return IndexCallable(self._vindex_get, self._vindex_set)
 
 
+class IndexingAdapter(ExplicitlyIndexedNDArrayMixin):
+    """Marker class for indexing adapters.
+
+    These classes translate between Xarray's indexing semantics and the underlying array's
+    indexing semantics.
+    """
+
+    def get_duck_array(self):
+        key = BasicIndexer((slice(None),) * self.ndim)
+        return self[key]
+
+    async def async_get_duck_array(self):
+        """These classes are applied to in-memory arrays, so specific async support isn't needed."""
+        return self.get_duck_array()
+
+
 class ImplicitToExplicitIndexingAdapter(NDArrayMixin):
     """Wrap an array, converting tuples into the indicated explicit indexer."""
 
@@ -568,7 +653,7 @@ class ImplicitToExplicitIndexingAdapter(
         self.indexer_cls = indexer_cls
 
     def __array__(
-        self, dtype: np.typing.DTypeLike = None, /, *, copy: bool | None = None
+        self, dtype: DTypeLike | None = None, /, *, copy: bool | None = None
     ) -> np.ndarray:
         if Version(np.__version__) >= Version("2.0.0"):
             return np.asarray(self.get_duck_array(), dtype=dtype, copy=copy)
@@ -609,8 +694,8 @@ class LazilyIndexedArray(ExplicitlyIndex
         """
         if isinstance(array, type(self)) and key is None:
             # unwrap
-            key = array.key  # type: ignore[has-type]
-            array = array.array  # type: ignore[has-type]
+            key = array.key  # type: ignore[has-type, unused-ignore]
+            array = array.array  # type: ignore[has-type, unused-ignore]
 
         if key is None:
             key = BasicIndexer((slice(None),) * array.ndim)
@@ -628,7 +713,8 @@ class LazilyIndexedArray(ExplicitlyIndex
 
     def _updated_key(self, new_key: ExplicitIndexer) -> BasicIndexer | OuterIndexer:
         iter_new_key = iter(expanded_indexer(new_key.tuple, self.ndim))
-        full_key = []
+
+        full_key: list[OuterIndexerType] = []
         for size, k in zip(self.array.shape, self.key.tuple, strict=True):
             if isinstance(k, integer_types):
                 full_key.append(k)
@@ -637,7 +723,7 @@ class LazilyIndexedArray(ExplicitlyIndex
         full_key_tuple = tuple(full_key)
 
         if all(isinstance(k, integer_types + (slice,)) for k in full_key_tuple):
-            return BasicIndexer(full_key_tuple)
+            return BasicIndexer(cast(tuple[BasicIndexerType, ...], full_key_tuple))
         return OuterIndexer(full_key_tuple)
 
     @property
@@ -645,19 +731,25 @@ class LazilyIndexedArray(ExplicitlyIndex
         return self._shape
 
     def get_duck_array(self):
-        if isinstance(self.array, ExplicitlyIndexedNDArrayMixin):
-            array = apply_indexer(self.array, self.key)
-        else:
-            # If the array is not an ExplicitlyIndexedNDArrayMixin,
-            # it may wrap a BackendArray so use its __getitem__
+        from xarray.backends.common import BackendArray
+
+        if isinstance(self.array, BackendArray):
             array = self.array[self.key]
+        else:
+            array = apply_indexer(self.array, self.key)
+            if isinstance(array, ExplicitlyIndexed):
+                array = array.get_duck_array()
+        return _wrap_numpy_scalars(array)
+
+    async def async_get_duck_array(self):
+        from xarray.backends.common import BackendArray
 
-        # self.array[self.key] is now a numpy array when
-        # self.array is a BackendArray subclass
-        # and self.key is BasicIndexer((slice(None, None, None),))
-        # so we need the explicit check for ExplicitlyIndexed
-        if isinstance(array, ExplicitlyIndexed):
-            array = array.get_duck_array()
+        if isinstance(self.array, BackendArray):
+            array = await self.array.async_getitem(self.key)
+        else:
+            array = apply_indexer(self.array, self.key)
+            if isinstance(array, ExplicitlyIndexed):
+                array = await array.async_get_duck_array()
         return _wrap_numpy_scalars(array)
 
     def transpose(self, order):
@@ -721,18 +813,25 @@ class LazilyVectorizedIndexedArray(Expli
         return np.broadcast(*self.key.tuple).shape
 
     def get_duck_array(self):
-        if isinstance(self.array, ExplicitlyIndexedNDArrayMixin):
+        from xarray.backends.common import BackendArray
+
+        if isinstance(self.array, BackendArray):
+            array = self.array[self.key]
+        else:
             array = apply_indexer(self.array, self.key)
+            if isinstance(array, ExplicitlyIndexed):
+                array = array.get_duck_array()
+        return _wrap_numpy_scalars(array)
+
+    async def async_get_duck_array(self):
+        from xarray.backends.common import BackendArray
+
+        if isinstance(self.array, BackendArray):
+            array = await self.array.async_getitem(self.key)
         else:
-            # If the array is not an ExplicitlyIndexedNDArrayMixin,
-            # it may wrap a BackendArray so use its __getitem__
-            array = self.array[self.key]
-        # self.array[self.key] is now a numpy array when
-        # self.array is a BackendArray subclass
-        # and self.key is BasicIndexer((slice(None, None, None),))
-        # so we need the explicit check for ExplicitlyIndexed
-        if isinstance(array, ExplicitlyIndexed):
-            array = array.get_duck_array()
+            array = apply_indexer(self.array, self.key)
+            if isinstance(array, ExplicitlyIndexed):
+                array = await array.async_get_duck_array()
         return _wrap_numpy_scalars(array)
 
     def _updated_key(self, new_key: ExplicitIndexer):
@@ -768,7 +867,15 @@ class LazilyVectorizedIndexedArray(Expli
 
 def _wrap_numpy_scalars(array):
     """Wrap NumPy scalars in 0d arrays."""
-    if np.isscalar(array):
+    ndim = duck_array_ops.ndim(array)
+    if ndim == 0 and (
+        isinstance(array, np.generic)
+        or not (is_duck_array(array) or isinstance(array, NDArrayMixin))
+    ):
+        return np.array(array)
+    elif hasattr(array, "dtype"):
+        return array
+    elif ndim == 0:
         return np.array(array)
     else:
         return array
@@ -789,6 +896,9 @@ class CopyOnWriteArray(ExplicitlyIndexed
     def get_duck_array(self):
         return self.array.get_duck_array()
 
+    async def async_get_duck_array(self):
+        return await self.array.async_get_duck_array()
+
     def _oindex_get(self, indexer: OuterIndexer):
         return type(self)(_wrap_numpy_scalars(self.array.oindex[indexer]))
 
@@ -829,12 +939,17 @@ class MemoryCachedArray(ExplicitlyIndexe
     def __init__(self, array):
         self.array = _wrap_numpy_scalars(as_indexable(array))
 
-    def _ensure_cached(self):
-        self.array = as_indexable(self.array.get_duck_array())
-
     def get_duck_array(self):
-        self._ensure_cached()
-        return self.array.get_duck_array()
+        duck_array = self.array.get_duck_array()
+        # ensure the array object is cached in-memory
+        self.array = as_indexable(duck_array)
+        return duck_array
+
+    async def async_get_duck_array(self):
+        duck_array = await self.array.async_get_duck_array()
+        # ensure the array object is cached in-memory
+        self.array = as_indexable(duck_array)
+        return duck_array
 
     def _oindex_get(self, indexer: OuterIndexer):
         return type(self)(_wrap_numpy_scalars(self.array.oindex[indexer]))
@@ -1019,6 +1134,21 @@ def explicit_indexing_adapter(
     return result
 
 
+async def async_explicit_indexing_adapter(
+    key: ExplicitIndexer,
+    shape: _Shape,
+    indexing_support: IndexingSupport,
+    raw_indexing_method: Callable[..., Any],
+) -> Any:
+    raw_key, numpy_indices = decompose_indexer(key, shape, indexing_support)
+    result = await raw_indexing_method(raw_key.tuple)
+    if numpy_indices.tuple:
+        # index the loaded duck array
+        indexable = as_indexable(result)
+        result = apply_indexer(indexable, numpy_indices)
+    return result
+
+
 def apply_indexer(indexable, indexer: ExplicitIndexer):
     """Apply an indexer to an indexable object."""
     if isinstance(indexer, VectorizedIndexer):
@@ -1121,8 +1251,8 @@ def _decompose_vectorized_indexer(
     if indexing_support is IndexingSupport.VECTORIZED:
         return indexer, BasicIndexer(())
 
-    backend_indexer_elems = []
-    np_indexer_elems = []
+    backend_indexer_elems: list[slice | np.ndarray[Any, np.dtype[np.generic]]] = []
+    np_indexer_elems: list[slice | np.ndarray[Any, np.dtype[np.generic]]] = []
     # convert negative indices
     indexer_elems = [
         np.where(k < 0, k + s, k) if isinstance(k, np.ndarray) else k
@@ -1257,7 +1387,7 @@ def _decompose_outer_indexer(
             elif isinstance(k, integer_types):
                 backend_indexer.append(k)
             else:  # slice:  convert positive step slice for backend
-                bk_slice, np_slice = _decompose_slice(k, s)
+                bk_slice, np_slice = _decompose_slice(cast(slice, k), s)
                 backend_indexer.append(bk_slice)
                 np_indexer.append(np_slice)
 
@@ -1295,7 +1425,7 @@ def _decompose_outer_indexer(
         elif isinstance(k, integer_types):
             backend_indexer.append(k)
         else:  # slice:  convert positive step slice for backend
-            bk_slice, np_slice = _decompose_slice(k, s)
+            bk_slice, np_slice = _decompose_slice(cast(slice, k), s)
             backend_indexer.append(bk_slice)
             np_indexer.append(np_slice)
 
@@ -1509,7 +1639,7 @@ def is_fancy_indexer(indexer: Any) -> bo
     """Return False if indexer is a int, slice, a 1-dimensional list, or a 0 or
     1-dimensional ndarray; in all other cases return True
     """
-    if isinstance(indexer, int | slice):
+    if isinstance(indexer, int | slice) and not isinstance(indexer, bool):
         return False
     if isinstance(indexer, np.ndarray):
         return indexer.ndim > 1
@@ -1518,7 +1648,7 @@ def is_fancy_indexer(indexer: Any) -> bo
     return True
 
 
-class NumpyIndexingAdapter(ExplicitlyIndexedNDArrayMixin):
+class NumpyIndexingAdapter(IndexingAdapter):
     """Wrap a NumPy array to use explicit indexing."""
 
     __slots__ = ("array",)
@@ -1597,7 +1727,7 @@ class NdArrayLikeIndexingAdapter(NumpyIn
         self.array = array
 
 
-class ArrayApiIndexingAdapter(ExplicitlyIndexedNDArrayMixin):
+class ArrayApiIndexingAdapter(IndexingAdapter):
     """Wrap an array API array to use explicit indexing."""
 
     __slots__ = ("array",)
@@ -1641,11 +1771,7 @@ class ArrayApiIndexingAdapter(Explicitly
 
 
 def _apply_vectorized_indexer_dask_wrapper(indices, coord):
-    from xarray.core.indexing import (
-        VectorizedIndexer,
-        apply_indexer,
-        as_indexable,
-    )
+    from xarray.core.indexing import VectorizedIndexer, apply_indexer, as_indexable
 
     return apply_indexer(
         as_indexable(coord), VectorizedIndexer((indices.squeeze(axis=-1),))
@@ -1662,7 +1788,7 @@ def _assert_not_chunked_indexer(idxr: tu
         )
 
 
-class DaskIndexingAdapter(ExplicitlyIndexedNDArrayMixin):
+class DaskIndexingAdapter(IndexingAdapter):
     """Wrap a dask array to support explicit indexing."""
 
     __slots__ = ("array",)
@@ -1738,34 +1864,63 @@ class DaskIndexingAdapter(ExplicitlyInde
         return self.array.transpose(order)
 
 
-class PandasIndexingAdapter(ExplicitlyIndexedNDArrayMixin):
+class PandasIndexingAdapter(IndexingAdapter):
     """Wrap a pandas.Index to preserve dtypes and handle explicit indexing."""
 
     __slots__ = ("_dtype", "array")
 
     array: pd.Index
-    _dtype: np.dtype
+    _dtype: np.dtype | pd.api.extensions.ExtensionDtype
 
-    def __init__(self, array: pd.Index, dtype: DTypeLike = None):
+    def __init__(
+        self,
+        array: pd.Index,
+        dtype: DTypeLike | pd.api.extensions.ExtensionDtype | None = None,
+    ):
         from xarray.core.indexes import safe_cast_to_index
 
         self.array = safe_cast_to_index(array)
 
         if dtype is None:
-            self._dtype = get_valid_numpy_dtype(array)
+            if is_allowed_extension_array(array):
+                cast(pd.api.extensions.ExtensionDtype, array.dtype)
+                self._dtype = array.dtype
+            else:
+                self._dtype = get_valid_numpy_dtype(array)
+        elif is_allowed_extension_array_dtype(dtype):
+            self._dtype = cast(pd.api.extensions.ExtensionDtype, dtype)
         else:
-            self._dtype = np.dtype(dtype)
+            self._dtype = np.dtype(cast(DTypeLike, dtype))
 
     @property
-    def dtype(self) -> np.dtype:
+    def _in_memory(self) -> bool:
+        # prevent costly conversion of a memory-saving pd.RangeIndex into a
+        # large numpy array.
+        return not isinstance(self.array, pd.RangeIndex)
+
+    @property
+    def dtype(self) -> np.dtype | pd.api.extensions.ExtensionDtype:  # type: ignore[override]
         return self._dtype
 
+    def _get_numpy_dtype(self, dtype: np.typing.DTypeLike | None = None) -> np.dtype:
+        if dtype is None:
+            if is_valid_numpy_dtype(self.dtype):
+                return cast(np.dtype, self.dtype)
+            else:
+                return get_valid_numpy_dtype(self.array)
+        else:
+            return np.dtype(dtype)
+
     def __array__(
-        self, dtype: np.typing.DTypeLike = None, /, *, copy: bool | None = None
+        self,
+        dtype: np.typing.DTypeLike | None = None,
+        /,
+        *,
+        copy: bool | None = None,
     ) -> np.ndarray:
-        if dtype is None:
-            dtype = self.dtype
+        dtype = self._get_numpy_dtype(dtype)
         array = self.array
+
         if isinstance(array, pd.PeriodIndex):
             with suppress(AttributeError):
                 # this might not be public API
@@ -1776,14 +1931,21 @@ class PandasIndexingAdapter(ExplicitlyIn
         else:
             return np.asarray(array.values, dtype=dtype)
 
-    def get_duck_array(self) -> np.ndarray:
+    def get_duck_array(self) -> np.ndarray | PandasExtensionArray:
+        # We return an PandasExtensionArray wrapper type that satisfies
+        # duck array protocols.
+        # `NumpyExtensionArray` is excluded
+        if is_allowed_extension_array(self.array):
+            from xarray.core.extension_array import PandasExtensionArray
+
+            return PandasExtensionArray(self.array.array)
         return np.asarray(self)
 
     @property
     def shape(self) -> _Shape:
         return (len(self.array),)
 
-    def _convert_scalar(self, item):
+    def _convert_scalar(self, item) -> np.ndarray:
         if item is pd.NaT:
             # work around the impossibility of casting NaT with asarray
             # note: it probably would be better in general to return
@@ -1799,95 +1961,61 @@ class PandasIndexingAdapter(ExplicitlyIn
             # numpy fails to convert pd.Timestamp to np.datetime64[ns]
             item = np.asarray(item.to_datetime64())
         elif self.dtype != object:
-            item = np.asarray(item, dtype=self.dtype)
+            dtype = self._get_numpy_dtype()
+            item = np.asarray(item, dtype=dtype)
 
         # as for numpy.ndarray indexing, we always want the result to be
         # a NumPy array.
         return to_0d_array(item)
 
-    def _prepare_key(self, key: Any | tuple[Any, ...]) -> tuple[Any, ...]:
-        if isinstance(key, tuple) and len(key) == 1:
+    def _index_get(
+        self, indexer: ExplicitIndexer, func_name: str
+    ) -> PandasIndexingAdapter | np.ndarray:
+        key = indexer.tuple
+
+        if len(key) == 1:
             # unpack key so it can index a pandas.Index object (pandas.Index
             # objects don't like tuples)
             (key,) = key
 
-        return key
+        # if multidimensional key, convert the index to numpy array and index the latter
+        if getattr(key, "ndim", 0) > 1:
+            indexable = NumpyIndexingAdapter(np.asarray(self))
+            return getattr(indexable, func_name)(indexer)
+
+        # otherwise index the pandas index then re-wrap or convert the result
+        result = self.array[key]
 
-    def _handle_result(
-        self, result: Any
-    ) -> (
-        PandasIndexingAdapter
-        | NumpyIndexingAdapter
-        | np.ndarray
-        | np.datetime64
-        | np.timedelta64
-    ):
         if isinstance(result, pd.Index):
             return type(self)(result, dtype=self.dtype)
         else:
             return self._convert_scalar(result)
 
-    def _oindex_get(
-        self, indexer: OuterIndexer
-    ) -> (
-        PandasIndexingAdapter
-        | NumpyIndexingAdapter
-        | np.ndarray
-        | np.datetime64
-        | np.timedelta64
-    ):
-        key = self._prepare_key(indexer.tuple)
-
-        if getattr(key, "ndim", 0) > 1:  # Return np-array if multidimensional
-            indexable = NumpyIndexingAdapter(np.asarray(self))
-            return indexable.oindex[indexer]
-
-        result = self.array[key]
-
-        return self._handle_result(result)
+    def _oindex_get(self, indexer: OuterIndexer) -> PandasIndexingAdapter | np.ndarray:
+        return self._index_get(indexer, "_oindex_get")
 
     def _vindex_get(
         self, indexer: VectorizedIndexer
-    ) -> (
-        PandasIndexingAdapter
-        | NumpyIndexingAdapter
-        | np.ndarray
-        | np.datetime64
-        | np.timedelta64
-    ):
+    ) -> PandasIndexingAdapter | np.ndarray:
         _assert_not_chunked_indexer(indexer.tuple)
-        key = self._prepare_key(indexer.tuple)
-
-        if getattr(key, "ndim", 0) > 1:  # Return np-array if multidimensional
-            indexable = NumpyIndexingAdapter(np.asarray(self))
-            return indexable.vindex[indexer]
-
-        result = self.array[key]
-
-        return self._handle_result(result)
+        return self._index_get(indexer, "_vindex_get")
 
     def __getitem__(
         self, indexer: ExplicitIndexer
-    ) -> (
-        PandasIndexingAdapter
-        | NumpyIndexingAdapter
-        | np.ndarray
-        | np.datetime64
-        | np.timedelta64
-    ):
-        key = self._prepare_key(indexer.tuple)
-
-        if getattr(key, "ndim", 0) > 1:  # Return np-array if multidimensional
-            indexable = NumpyIndexingAdapter(np.asarray(self))
-            return indexable[indexer]
-
-        result = self.array[key]
-
-        return self._handle_result(result)
+    ) -> PandasIndexingAdapter | np.ndarray:
+        return self._index_get(indexer, "__getitem__")
 
     def transpose(self, order) -> pd.Index:
         return self.array  # self.array should be always one-dimensional
 
+    def _repr_inline_(self, max_width: int) -> str:
+        # we want to display values in the inline repr for lazy coordinates too
+        # (pd.RangeIndex and pd.MultiIndex). `format_array_flat` prevents loading
+        # the whole array in memory.
+        from xarray.core.formatting import format_array_flat
+
+        return format_array_flat(self, max_width)
+
     def __repr__(self) -> str:
         return f"{type(self).__name__}(array={self.array!r}, dtype={self.dtype!r})"
 
@@ -1902,6 +2030,14 @@ class PandasIndexingAdapter(ExplicitlyIn
         array = self.array.copy(deep=True) if deep else self.array
         return type(self)(array, self._dtype)
 
+    @property
+    def nbytes(self) -> int:
+        if is_allowed_extension_array(self.array):
+            return self.array.nbytes
+
+        dtype = self._get_numpy_dtype()
+        return dtype.itemsize * len(self.array)
+
 
 class PandasMultiIndexingAdapter(PandasIndexingAdapter):
     """Handles explicit indexing for a pandas.MultiIndex.
@@ -1914,23 +2050,27 @@ class PandasMultiIndexingAdapter(PandasI
     __slots__ = ("_dtype", "adapter", "array", "level")
 
     array: pd.MultiIndex
-    _dtype: np.dtype
+    _dtype: np.dtype | pd.api.extensions.ExtensionDtype
     level: str | None
 
     def __init__(
         self,
         array: pd.MultiIndex,
-        dtype: DTypeLike = None,
+        dtype: DTypeLike | pd.api.extensions.ExtensionDtype | None = None,
         level: str | None = None,
     ):
         super().__init__(array, dtype)
         self.level = level
 
     def __array__(
-        self, dtype: np.typing.DTypeLike = None, /, *, copy: bool | None = None
+        self,
+        dtype: DTypeLike | None = None,
+        /,
+        *,
+        copy: bool | None = None,
     ) -> np.ndarray:
-        if dtype is None:
-            dtype = self.dtype
+        dtype = self._get_numpy_dtype(dtype)
+
         if self.level is not None:
             return np.asarray(
                 self.array.get_level_values(self.level).values, dtype=dtype
@@ -1938,45 +2078,26 @@ class PandasMultiIndexingAdapter(PandasI
         else:
             return super().__array__(dtype, copy=copy)
 
-    def _convert_scalar(self, item):
+    @property
+    def _in_memory(self) -> bool:
+        # The pd.MultiIndex's data is fully in memory, but it has a different
+        # layout than the level and dimension coordinate arrays. Marking this
+        # adapter class as a "lazy" array will prevent costly conversion when,
+        # e.g., formatting the Xarray reprs.
+        return False
+
+    def _convert_scalar(self, item: Any):
         if isinstance(item, tuple) and self.level is not None:
             idx = tuple(self.array.names).index(self.level)
             item = item[idx]
         return super()._convert_scalar(item)
 
-    def _oindex_get(
-        self, indexer: OuterIndexer
-    ) -> (
-        PandasIndexingAdapter
-        | NumpyIndexingAdapter
-        | np.ndarray
-        | np.datetime64
-        | np.timedelta64
-    ):
-        result = super()._oindex_get(indexer)
-        if isinstance(result, type(self)):
-            result.level = self.level
-        return result
-
-    def _vindex_get(
-        self, indexer: VectorizedIndexer
-    ) -> (
-        PandasIndexingAdapter
-        | NumpyIndexingAdapter
-        | np.ndarray
-        | np.datetime64
-        | np.timedelta64
-    ):
-        result = super()._vindex_get(indexer)
-        if isinstance(result, type(self)):
-            result.level = self.level
-        return result
-
-    def __getitem__(self, indexer: ExplicitIndexer):
-        result = super().__getitem__(indexer)
+    def _index_get(
+        self, indexer: ExplicitIndexer, func_name: str
+    ) -> PandasIndexingAdapter | np.ndarray:
+        result = super()._index_get(indexer, func_name)
         if isinstance(result, type(self)):
             result.level = self.level
-
         return result
 
     def __repr__(self) -> str:
@@ -1988,31 +2109,11 @@ class PandasMultiIndexingAdapter(PandasI
             )
             return f"{type(self).__name__}{props}"
 
-    def _get_array_subset(self) -> np.ndarray:
-        # used to speed-up the repr for big multi-indexes
-        threshold = max(100, OPTIONS["display_values_threshold"] + 2)
-        if self.size > threshold:
-            pos = threshold // 2
-            indices = np.concatenate([np.arange(0, pos), np.arange(-pos, 0)])
-            subset = self[OuterIndexer((indices,))]
-        else:
-            subset = self
-
-        return np.asarray(subset)
-
     def _repr_inline_(self, max_width: int) -> str:
-        from xarray.core.formatting import format_array_flat
-
         if self.level is None:
             return "MultiIndex"
         else:
-            return format_array_flat(self._get_array_subset(), max_width)
-
-    def _repr_html_(self) -> str:
-        from xarray.core.formatting import short_array_repr
-
-        array_repr = short_array_repr(self._get_array_subset())
-        return f"<pre>{escape(array_repr)}</pre>"
+            return super()._repr_inline_(max_width=max_width)
 
     def copy(self, deep: bool = True) -> Self:
         # see PandasIndexingAdapter.copy
@@ -2020,7 +2121,7 @@ class PandasMultiIndexingAdapter(PandasI
         return type(self)(array, self._dtype, self.level)
 
 
-class CoordinateTransformIndexingAdapter(ExplicitlyIndexedNDArrayMixin):
+class CoordinateTransformIndexingAdapter(IndexingAdapter):
     """Wrap a CoordinateTransform as a lazy coordinate array.
 
     Supports explicit indexing (both outer and vectorized).
@@ -2049,6 +2150,10 @@ class CoordinateTransformIndexingAdapter
     def shape(self) -> tuple[int, ...]:
         return tuple(self._transform.dim_size.values())
 
+    @property
+    def _in_memory(self) -> bool:
+        return False
+
     def get_duck_array(self) -> np.ndarray:
         all_coords = self._transform.generate_coords(dims=self._dims)
         return np.asarray(all_coords[self._coord_name])
@@ -2103,29 +2208,15 @@ class CoordinateTransformIndexingAdapter
         )
 
     def transpose(self, order: Iterable[int]) -> Self:
-        new_dims = tuple([self._dims[i] for i in order])
+        new_dims = tuple(self._dims[i] for i in order)
         return type(self)(self._transform, self._coord_name, new_dims)
 
     def __repr__(self: Any) -> str:
         return f"{type(self).__name__}(transform={self._transform!r})"
 
-    def _get_array_subset(self) -> np.ndarray:
-        threshold = max(100, OPTIONS["display_values_threshold"] + 2)
-        if self.size > threshold:
-            pos = threshold // 2
-            flat_indices = np.concatenate(
-                [np.arange(0, pos), np.arange(self.size - pos, self.size)]
-            )
-            subset = self.vindex[
-                VectorizedIndexer(np.unravel_index(flat_indices, self.shape))
-            ]
-        else:
-            subset = self
-
-        return np.asarray(subset)
-
     def _repr_inline_(self, max_width: int) -> str:
-        """Good to see some labels even for a lazy coordinate."""
+        # we want to display values in the inline repr for this lazy coordinate
+        # `format_array_flat` prevents loading the whole array in memory.
         from xarray.core.formatting import format_array_flat
 
-        return format_array_flat(self._get_array_subset(), max_width)
+        return format_array_flat(self, max_width)
diff -pruN 2025.03.1-8/xarray/core/missing.py 2025.10.1-1/xarray/core/missing.py
--- 2025.03.1-8/xarray/core/missing.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/xarray/core/missing.py	2025-10-10 10:38:05.000000000 +0000
@@ -364,11 +364,10 @@ def interp_na(
             # Convert to float
             max_gap = timedelta_to_numeric(max_gap)
 
-        if not use_coordinate:
-            if not isinstance(max_gap, Number | np.number):
-                raise TypeError(
-                    f"Expected integer or floating point max_gap since use_coordinate=False. Received {max_type}."
-                )
+        if not use_coordinate and not isinstance(max_gap, Number | np.number):
+            raise TypeError(
+                f"Expected integer or floating point max_gap since use_coordinate=False. Received {max_type}."
+            )
 
     # method
     index = get_clean_interp_index(self, dim, use_coordinate=use_coordinate)
@@ -499,7 +498,7 @@ def _get_interpolator(
     # take higher dimensional data but scipy.interp1d can.
     if (
         method == "linear"
-        and not kwargs.get("fill_value") == "extrapolate"
+        and kwargs.get("fill_value") != "extrapolate"
         and not vectorizeable_only
     ):
         kwargs.update(method=method)
diff -pruN 2025.03.1-8/xarray/core/options.py 2025.10.1-1/xarray/core/options.py
--- 2025.03.1-8/xarray/core/options.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/xarray/core/options.py	2025-10-10 10:38:05.000000000 +0000
@@ -1,6 +1,7 @@
 from __future__ import annotations
 
 import warnings
+from collections.abc import Sequence
 from typing import TYPE_CHECKING, Any, Literal, TypedDict
 
 from xarray.core.utils import FrozenDict
@@ -13,6 +14,7 @@ if TYPE_CHECKING:
         "chunk_manager",
         "cmap_divergent",
         "cmap_sequential",
+        "display_max_children",
         "display_max_rows",
         "display_values_threshold",
         "display_style",
@@ -27,8 +29,10 @@ if TYPE_CHECKING:
         "enable_cftimeindex",
         "file_cache_maxsize",
         "keep_attrs",
+        "netcdf_engine_order",
         "warn_for_unclosed_files",
         "use_bottleneck",
+        "use_new_combine_kwarg_defaults",
         "use_numbagg",
         "use_opt_einsum",
         "use_flox",
@@ -40,23 +44,26 @@ if TYPE_CHECKING:
         chunk_manager: str
         cmap_divergent: str | Colormap
         cmap_sequential: str | Colormap
+        display_max_children: int
         display_max_rows: int
         display_values_threshold: int
         display_style: Literal["text", "html"]
         display_width: int
-        display_expand_attrs: Literal["default", True, False]
-        display_expand_coords: Literal["default", True, False]
-        display_expand_data_vars: Literal["default", True, False]
-        display_expand_data: Literal["default", True, False]
-        display_expand_groups: Literal["default", True, False]
-        display_expand_indexes: Literal["default", True, False]
-        display_default_indexes: Literal["default", True, False]
+        display_expand_attrs: Literal["default"] | bool
+        display_expand_coords: Literal["default"] | bool
+        display_expand_data_vars: Literal["default"] | bool
+        display_expand_data: Literal["default"] | bool
+        display_expand_groups: Literal["default"] | bool
+        display_expand_indexes: Literal["default"] | bool
+        display_default_indexes: Literal["default"] | bool
         enable_cftimeindex: bool
         file_cache_maxsize: int
-        keep_attrs: Literal["default", True, False]
+        keep_attrs: Literal["default"] | bool
+        netcdf_engine_order: Sequence[Literal["netcdf4", "h5netcdf", "scipy"]]
         warn_for_unclosed_files: bool
         use_bottleneck: bool
         use_flox: bool
+        use_new_combine_kwarg_defaults: bool
         use_numbagg: bool
         use_opt_einsum: bool
 
@@ -67,6 +74,7 @@ OPTIONS: T_Options = {
     "chunk_manager": "dask",
     "cmap_divergent": "RdBu_r",
     "cmap_sequential": "viridis",
+    "display_max_children": 6,
     "display_max_rows": 12,
     "display_values_threshold": 200,
     "display_style": "html",
@@ -81,15 +89,18 @@ OPTIONS: T_Options = {
     "enable_cftimeindex": True,
     "file_cache_maxsize": 128,
     "keep_attrs": "default",
+    "netcdf_engine_order": ("netcdf4", "h5netcdf", "scipy"),
     "warn_for_unclosed_files": False,
     "use_bottleneck": True,
     "use_flox": True,
+    "use_new_combine_kwarg_defaults": False,
     "use_numbagg": True,
     "use_opt_einsum": True,
 }
 
 _JOIN_OPTIONS = frozenset(["inner", "outer", "left", "right", "exact"])
 _DISPLAY_OPTIONS = frozenset(["text", "html"])
+_NETCDF_ENGINES = frozenset(["netcdf4", "h5netcdf", "scipy"])
 
 
 def _positive_integer(value: Any) -> bool:
@@ -99,6 +110,7 @@ def _positive_integer(value: Any) -> boo
 _VALIDATORS = {
     "arithmetic_broadcast": lambda value: isinstance(value, bool),
     "arithmetic_join": _JOIN_OPTIONS.__contains__,
+    "display_max_children": _positive_integer,
     "display_max_rows": _positive_integer,
     "display_values_threshold": _positive_integer,
     "display_style": _DISPLAY_OPTIONS.__contains__,
@@ -112,7 +124,9 @@ _VALIDATORS = {
     "enable_cftimeindex": lambda value: isinstance(value, bool),
     "file_cache_maxsize": _positive_integer,
     "keep_attrs": lambda choice: choice in [True, False, "default"],
+    "netcdf_engine_order": lambda engines: set(engines) <= _NETCDF_ENGINES,
     "use_bottleneck": lambda value: isinstance(value, bool),
+    "use_new_combine_kwarg_defaults": lambda value: isinstance(value, bool),
     "use_numbagg": lambda value: isinstance(value, bool),
     "use_opt_einsum": lambda value: isinstance(value, bool),
     "use_flox": lambda value: isinstance(value, bool),
@@ -222,6 +236,8 @@ class set_options:
         * ``True`` : to always expand indexes
         * ``False`` : to always collapse indexes
         * ``default`` : to expand unless over a pre-defined limit (always collapse for html style)
+    display_max_children : int, default: 6
+        Maximum number of children to display for each node in a DataTree.
     display_max_rows : int, default: 12
         Maximum display rows.
     display_values_threshold : int, default: 200
@@ -244,12 +260,26 @@ class set_options:
         * ``False`` : to always discard attrs
         * ``default`` : to use original logic that attrs should only
           be kept in unambiguous circumstances
+    netcdf_engine_order : sequence, default ['netcdf4', 'h5netcdf', 'scipy']
+        Preference order of backend engines to use when reading or writing
+        netCDF files with ``open_dataset()`` and ``to_netcdf()`` if ``engine``
+        is not explicitly specified. May be any permutation or subset of
+        ``['netcdf4', 'h5netcdf', 'scipy']``.
     use_bottleneck : bool, default: True
         Whether to use ``bottleneck`` to accelerate 1D reductions and
         1D rolling reduction operations.
     use_flox : bool, default: True
         Whether to use ``numpy_groupies`` and `flox`` to
         accelerate groupby and resampling reductions.
+    use_new_combine_kwarg_defaults : bool, default False
+        Whether to use new kwarg default values for combine functions:
+        :py:func:`~xarray.concat`, :py:func:`~xarray.merge`,
+        :py:func:`~xarray.open_mfdataset`. New values are:
+
+        * ``data_vars``: None
+        * ``coords``: "minimal"
+        * ``compat``: "override"
+        * ``join``: "exact"
     use_numbagg : bool, default: True
         Whether to use ``numbagg`` to accelerate reductions.
         Takes precedence over ``use_bottleneck`` when both are True.
@@ -292,6 +322,8 @@ class set_options:
                     expected = f"Expected one of {_JOIN_OPTIONS!r}"
                 elif k == "display_style":
                     expected = f"Expected one of {_DISPLAY_OPTIONS!r}"
+                elif k == "netcdf_engine_order":
+                    expected = f"Expected a subset of {sorted(_NETCDF_ENGINES)}"
                 else:
                     expected = ""
                 raise ValueError(
diff -pruN 2025.03.1-8/xarray/core/parallel.py 2025.10.1-1/xarray/core/parallel.py
--- 2025.03.1-8/xarray/core/parallel.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/xarray/core/parallel.py	2025-10-10 10:38:05.000000000 +0000
@@ -351,7 +351,9 @@ def map_blocks(
         result = func(*converted_args, **kwargs)
 
         merged_coordinates = merge(
-            [arg.coords for arg in args if isinstance(arg, Dataset | DataArray)]
+            [arg.coords for arg in args if isinstance(arg, Dataset | DataArray)],
+            join="exact",
+            compat="override",
         ).coords
 
         # check all dims are present
@@ -363,12 +365,14 @@ def map_blocks(
 
         # check that index lengths and values are as expected
         for name, index in result._indexes.items():
-            if name in expected["shapes"]:
-                if result.sizes[name] != expected["shapes"][name]:
-                    raise ValueError(
-                        f"Received dimension {name!r} of length {result.sizes[name]}. "
-                        f"Expected length {expected['shapes'][name]}."
-                    )
+            if (
+                name in expected["shapes"]
+                and result.sizes[name] != expected["shapes"][name]
+            ):
+                raise ValueError(
+                    f"Received dimension {name!r} of length {result.sizes[name]}. "
+                    f"Expected length {expected['shapes'][name]}."
+                )
 
             # ChainMap wants MutableMapping, but xindexes is Mapping
             merged_indexes = collections.ChainMap(
@@ -439,7 +443,11 @@ def map_blocks(
     # rechunk any numpy variables appropriately
     xarray_objs = tuple(arg.chunk(arg.chunksizes) for arg in xarray_objs)
 
-    merged_coordinates = merge([arg.coords for arg in aligned]).coords
+    merged_coordinates = merge(
+        [arg.coords for arg in aligned],
+        join="exact",
+        compat="override",
+    ).coords
 
     _, npargs = unzip(
         sorted(
@@ -472,7 +480,10 @@ def map_blocks(
         )
 
         coordinates = merge(
-            (preserved_coords, template.coords.to_dataset()[new_coord_vars])
+            (preserved_coords, template.coords.to_dataset()[new_coord_vars]),
+            # FIXME: this should be join="exact", but breaks a test
+            join="outer",
+            compat="override",
         ).coords
         output_chunks: Mapping[Hashable, tuple[int, ...]] = {
             dim: input_chunks[dim] for dim in template.dims if dim in input_chunks
diff -pruN 2025.03.1-8/xarray/core/resample.py 2025.10.1-1/xarray/core/resample.py
--- 2025.03.1-8/xarray/core/resample.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/xarray/core/resample.py	2025-10-10 10:38:05.000000000 +0000
@@ -103,7 +103,7 @@ class Resample(GroupBy[T_Xarray]):
         dask.dataframe.DataFrame.shuffle
         dask.array.shuffle
         """
-        (grouper,) = self.groupers
+        (_grouper,) = self.groupers
         return self._shuffle_obj(chunks).drop_vars(RESAMPLE_DIM)
 
     def _first_or_last(
diff -pruN 2025.03.1-8/xarray/core/resample_cftime.py 2025.10.1-1/xarray/core/resample_cftime.py
--- 2025.03.1-8/xarray/core/resample_cftime.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/xarray/core/resample_cftime.py	2025-10-10 10:38:05.000000000 +0000
@@ -45,6 +45,7 @@ import numpy as np
 import pandas as pd
 
 from xarray.coding.cftime_offsets import (
+    CFTIME_TICKS,
     BaseCFTimeOffset,
     MonthEnd,
     QuarterEnd,
@@ -56,6 +57,7 @@ from xarray.coding.cftime_offsets import
 )
 from xarray.coding.cftimeindex import CFTimeIndex
 from xarray.core.types import SideOptions
+from xarray.core.utils import emit_user_level_warning
 
 if typing.TYPE_CHECKING:
     from xarray.core.types import CFTimeDatetime, ResampleCompatible
@@ -84,7 +86,32 @@ class CFTimeGrouper:
         self.freq = to_offset(freq)
         self.origin = origin
 
-        if isinstance(self.freq, MonthEnd | QuarterEnd | YearEnd):
+        if not isinstance(self.freq, CFTIME_TICKS):
+            if offset is not None:
+                message = (
+                    "The 'offset' keyword does not take effect when "
+                    "resampling with a 'freq' that is not Tick-like (h, m, s, "
+                    "ms, us)"
+                )
+                emit_user_level_warning(message, category=RuntimeWarning)
+            if origin != "start_day":
+                message = (
+                    "The 'origin' keyword does not take effect when "
+                    "resampling with a 'freq' that is not Tick-like (h, m, s, "
+                    "ms, us)"
+                )
+                emit_user_level_warning(message, category=RuntimeWarning)
+
+        if isinstance(self.freq, MonthEnd | QuarterEnd | YearEnd) or self.origin in [
+            "end",
+            "end_day",
+        ]:
+            # The backward resample sets ``closed`` to ``'right'`` by default
+            # since the last value should be considered as the edge point for
+            # the last bin. When origin in "end" or "end_day", the value for a
+            # specific ``cftime.datetime`` index stands for the resample result
+            # from the current ``cftime.datetime`` minus ``freq`` to the current
+            # ``cftime.datetime`` with a right close.
             if closed is None:
                 self.closed = "right"
             else:
@@ -94,30 +121,14 @@ class CFTimeGrouper:
             else:
                 self.label = label
         else:
-            # The backward resample sets ``closed`` to ``'right'`` by default
-            # since the last value should be considered as the edge point for
-            # the last bin. When origin in "end" or "end_day", the value for a
-            # specific ``cftime.datetime`` index stands for the resample result
-            # from the current ``cftime.datetime`` minus ``freq`` to the current
-            # ``cftime.datetime`` with a right close.
-            if self.origin in ["end", "end_day"]:
-                if closed is None:
-                    self.closed = "right"
-                else:
-                    self.closed = closed
-                if label is None:
-                    self.label = "right"
-                else:
-                    self.label = label
+            if closed is None:
+                self.closed = "left"
             else:
-                if closed is None:
-                    self.closed = "left"
-                else:
-                    self.closed = closed
-                if label is None:
-                    self.label = "left"
-                else:
-                    self.label = label
+                self.closed = closed
+            if label is None:
+                self.label = "left"
+            else:
+                self.label = label
 
         if offset is not None:
             try:
@@ -475,7 +486,7 @@ def exact_cftime_datetime_difference(a:
     datetime.timedelta
     """
     seconds = b.replace(microsecond=0) - a.replace(microsecond=0)
-    seconds = int(round(seconds.total_seconds()))
+    seconds = round(seconds.total_seconds())
     microseconds = b.microsecond - a.microsecond
     return datetime.timedelta(seconds=seconds, microseconds=microseconds)
 
diff -pruN 2025.03.1-8/xarray/core/treenode.py 2025.10.1-1/xarray/core/treenode.py
--- 2025.03.1-8/xarray/core/treenode.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/xarray/core/treenode.py	2025-10-10 10:38:05.000000000 +0000
@@ -7,7 +7,6 @@ from pathlib import PurePosixPath
 from typing import (
     TYPE_CHECKING,
     Any,
-    Generic,
     TypeVar,
 )
 
@@ -15,7 +14,7 @@ from xarray.core.types import Self
 from xarray.core.utils import Frozen, is_dict_like
 
 if TYPE_CHECKING:
-    from xarray.core.types import T_DataArray
+    from xarray.core.dataarray import DataArray
 
 
 class InvalidTreeError(Exception):
@@ -43,11 +42,12 @@ class NodePath(PurePosixPath):
             )
         # TODO should we also forbid suffixes to avoid node names with dots in them?
 
+    def absolute(self) -> Self:
+        """Convert into an absolute path."""
+        return type(self)("/", *self.parts)
 
-Tree = TypeVar("Tree", bound="TreeNode")
 
-
-class TreeNode(Generic[Tree]):
+class TreeNode:
     """
     Base class representing a node of a tree, with methods for traversing and altering the tree.
 
@@ -74,10 +74,10 @@ class TreeNode(Generic[Tree]):
 
     """
 
-    _parent: Tree | None
-    _children: dict[str, Tree]
+    _parent: Self | None
+    _children: dict[str, Self]
 
-    def __init__(self, children: Mapping[str, Tree] | None = None):
+    def __init__(self, children: Mapping[str, Self] | None = None):
         """Create a parentless node."""
         self._parent = None
         self._children = {}
@@ -87,18 +87,18 @@ class TreeNode(Generic[Tree]):
             self.children = {name: child.copy() for name, child in children.items()}
 
     @property
-    def parent(self) -> Tree | None:
+    def parent(self) -> Self | None:
         """Parent of this node."""
         return self._parent
 
     @parent.setter
-    def parent(self: Tree, new_parent: Tree) -> None:
+    def parent(self, new_parent: Self) -> None:
         raise AttributeError(
             "Cannot set parent attribute directly, you must modify the children of the other node instead using dict-like syntax"
         )
 
     def _set_parent(
-        self, new_parent: Tree | None, child_name: str | None = None
+        self, new_parent: Self | None, child_name: str | None = None
     ) -> None:
         # TODO is it possible to refactor in a way that removes this private method?
 
@@ -114,7 +114,7 @@ class TreeNode(Generic[Tree]):
             self._detach(old_parent)
             self._attach(new_parent, child_name)
 
-    def _check_loop(self, new_parent: Tree | None) -> None:
+    def _check_loop(self, new_parent: Self | None) -> None:
         """Checks that assignment of this new parent will not create a cycle."""
         if new_parent is not None:
             if new_parent is self:
@@ -127,10 +127,10 @@ class TreeNode(Generic[Tree]):
                     "Cannot set parent, as intended parent is already a descendant of this node."
                 )
 
-    def _is_descendant_of(self, node: Tree) -> bool:
+    def _is_descendant_of(self, node: Self) -> bool:
         return any(n is self for n in node.parents)
 
-    def _detach(self, parent: Tree | None) -> None:
+    def _detach(self, parent: Self | None) -> None:
         if parent is not None:
             self._pre_detach(parent)
             parents_children = parent.children
@@ -142,7 +142,7 @@ class TreeNode(Generic[Tree]):
             self._parent = None
             self._post_detach(parent)
 
-    def _attach(self, parent: Tree | None, child_name: str | None = None) -> None:
+    def _attach(self, parent: Self | None, child_name: str | None = None) -> None:
         if parent is not None:
             if child_name is None:
                 raise ValueError(
@@ -165,12 +165,12 @@ class TreeNode(Generic[Tree]):
         self._set_parent(new_parent=None)
 
     @property
-    def children(self: Tree) -> Mapping[str, Tree]:
+    def children(self) -> Mapping[str, Self]:
         """Child nodes of this node, stored under a mapping via their names."""
         return Frozen(self._children)
 
     @children.setter
-    def children(self: Tree, children: Mapping[str, Tree]) -> None:
+    def children(self, children: Mapping[str, Self]) -> None:
         self._check_children(children)
         children = {**children}
 
@@ -198,7 +198,7 @@ class TreeNode(Generic[Tree]):
         self._post_detach_children(children)
 
     @staticmethod
-    def _check_children(children: Mapping[str, Tree]) -> None:
+    def _check_children(children: Mapping[str, TreeNode]) -> None:
         """Check children for correct types and for any duplicates."""
         if not is_dict_like(children):
             raise TypeError(
@@ -224,19 +224,19 @@ class TreeNode(Generic[Tree]):
     def __repr__(self) -> str:
         return f"TreeNode(children={dict(self._children)})"
 
-    def _pre_detach_children(self: Tree, children: Mapping[str, Tree]) -> None:
+    def _pre_detach_children(self, children: Mapping[str, Self]) -> None:
         """Method call before detaching `children`."""
         pass
 
-    def _post_detach_children(self: Tree, children: Mapping[str, Tree]) -> None:
+    def _post_detach_children(self, children: Mapping[str, Self]) -> None:
         """Method call after detaching `children`."""
         pass
 
-    def _pre_attach_children(self: Tree, children: Mapping[str, Tree]) -> None:
+    def _pre_attach_children(self, children: Mapping[str, Self]) -> None:
         """Method call before attaching `children`."""
         pass
 
-    def _post_attach_children(self: Tree, children: Mapping[str, Tree]) -> None:
+    def _post_attach_children(self, children: Mapping[str, Self]) -> None:
         """Method call after attaching `children`."""
         pass
 
@@ -300,14 +300,14 @@ class TreeNode(Generic[Tree]):
     def __deepcopy__(self, memo: dict[int, Any] | None = None) -> Self:
         return self._copy_subtree(inherit=True, deep=True, memo=memo)
 
-    def _iter_parents(self: Tree) -> Iterator[Tree]:
+    def _iter_parents(self) -> Iterator[Self]:
         """Iterate up the tree, starting from the current node's parent."""
-        node: Tree | None = self.parent
+        node: Self | None = self.parent
         while node is not None:
             yield node
             node = node.parent
 
-    def iter_lineage(self: Tree) -> tuple[Tree, ...]:
+    def iter_lineage(self) -> tuple[Self, ...]:
         """Iterate up the tree, starting from the current node."""
         from warnings import warn
 
@@ -320,7 +320,7 @@ class TreeNode(Generic[Tree]):
         return (self, *self.parents)
 
     @property
-    def lineage(self: Tree) -> tuple[Tree, ...]:
+    def lineage(self) -> tuple[Self, ...]:
         """All parent nodes and their parent nodes, starting with the closest."""
         from warnings import warn
 
@@ -333,12 +333,12 @@ class TreeNode(Generic[Tree]):
         return self.iter_lineage()
 
     @property
-    def parents(self: Tree) -> tuple[Tree, ...]:
+    def parents(self) -> tuple[Self, ...]:
         """All parent nodes and their parent nodes, starting with the closest."""
         return tuple(self._iter_parents())
 
     @property
-    def ancestors(self: Tree) -> tuple[Tree, ...]:
+    def ancestors(self) -> tuple[Self, ...]:
         """All parent nodes and their parent nodes, starting with the most distant."""
 
         from warnings import warn
@@ -352,7 +352,7 @@ class TreeNode(Generic[Tree]):
         return (*reversed(self.parents), self)
 
     @property
-    def root(self: Tree) -> Tree:
+    def root(self) -> Self:
         """Root node of the tree"""
         node = self
         while node.parent is not None:
@@ -374,7 +374,7 @@ class TreeNode(Generic[Tree]):
         return self.children == {}
 
     @property
-    def leaves(self: Tree) -> tuple[Tree, ...]:
+    def leaves(self) -> tuple[Self, ...]:
         """
         All leaf nodes.
 
@@ -383,7 +383,7 @@ class TreeNode(Generic[Tree]):
         return tuple(node for node in self.subtree if node.is_leaf)
 
     @property
-    def siblings(self: Tree) -> dict[str, Tree]:
+    def siblings(self) -> dict[str, Self]:
         """
         Nodes with the same parent as this node.
         """
@@ -397,7 +397,7 @@ class TreeNode(Generic[Tree]):
             return {}
 
     @property
-    def subtree(self: Tree) -> Iterator[Tree]:
+    def subtree(self) -> Iterator[Self]:
         """
         Iterate over all nodes in this tree, including both self and all descendants.
 
@@ -417,7 +417,7 @@ class TreeNode(Generic[Tree]):
             queue.extend(node.children.values())
 
     @property
-    def subtree_with_keys(self: Tree) -> Iterator[tuple[str, Tree]]:
+    def subtree_with_keys(self) -> Iterator[tuple[str, Self]]:
         """
         Iterate over relative paths and node pairs for all nodes in this tree.
 
@@ -436,7 +436,7 @@ class TreeNode(Generic[Tree]):
             queue.extend((path / name, child) for name, child in node.children.items())
 
     @property
-    def descendants(self: Tree) -> tuple[Tree, ...]:
+    def descendants(self) -> tuple[Self, ...]:
         """
         Child nodes and all their child nodes.
 
@@ -447,11 +447,11 @@ class TreeNode(Generic[Tree]):
         DataTree.subtree
         """
         all_nodes = tuple(self.subtree)
-        this_node, *descendants = all_nodes
+        _this_node, *descendants = all_nodes
         return tuple(descendants)
 
     @property
-    def level(self: Tree) -> int:
+    def level(self) -> int:
         """
         Level of this node.
 
@@ -470,7 +470,7 @@ class TreeNode(Generic[Tree]):
         return len(self.parents)
 
     @property
-    def depth(self: Tree) -> int:
+    def depth(self) -> int:
         """
         Maximum level of this tree.
 
@@ -488,7 +488,7 @@ class TreeNode(Generic[Tree]):
         return max(node.level for node in self.root.subtree)
 
     @property
-    def width(self: Tree) -> int:
+    def width(self) -> int:
         """
         Number of nodes at this level in the tree.
 
@@ -505,23 +505,23 @@ class TreeNode(Generic[Tree]):
         """
         return len([node for node in self.root.subtree if node.level == self.level])
 
-    def _pre_detach(self: Tree, parent: Tree) -> None:
+    def _pre_detach(self, parent: Self) -> None:
         """Method call before detaching from `parent`."""
         pass
 
-    def _post_detach(self: Tree, parent: Tree) -> None:
+    def _post_detach(self, parent: Self) -> None:
         """Method call after detaching from `parent`."""
         pass
 
-    def _pre_attach(self: Tree, parent: Tree, name: str) -> None:
+    def _pre_attach(self, parent: Self, name: str) -> None:
         """Method call before attaching to `parent`."""
         pass
 
-    def _post_attach(self: Tree, parent: Tree, name: str) -> None:
+    def _post_attach(self, parent: Self, name: str) -> None:
         """Method call after attaching to `parent`."""
         pass
 
-    def get(self: Tree, key: str, default: Tree | None = None) -> Tree | None:
+    def get(self, key: str, default: Self | None = None) -> Self | None:
         """
         Return the child node with the specified key.
 
@@ -535,7 +535,7 @@ class TreeNode(Generic[Tree]):
 
     # TODO `._walk` method to be called by both `_get_item` and `_set_item`
 
-    def _get_item(self: Tree, path: str | NodePath) -> Tree | T_DataArray:
+    def _get_item(self, path: str | NodePath) -> Self | DataArray:
         """
         Returns the object lying at the given path.
 
@@ -546,7 +546,7 @@ class TreeNode(Generic[Tree]):
 
         if path.root:
             current_node = self.root
-            root, *parts = list(path.parts)
+            _root, *parts = list(path.parts)
         else:
             current_node = self
             parts = list(path.parts)
@@ -560,13 +560,13 @@ class TreeNode(Generic[Tree]):
             elif part in ("", "."):
                 pass
             else:
-                if current_node.get(part) is None:
+                child = current_node.get(part)
+                if child is None:
                     raise KeyError(f"Could not find node at {path}")
-                else:
-                    current_node = current_node.get(part)
+                current_node = child
         return current_node
 
-    def _set(self: Tree, key: str, val: Tree) -> None:
+    def _set(self, key: str, val: Any) -> None:
         """
         Set the child node with the specified key to value.
 
@@ -576,9 +576,9 @@ class TreeNode(Generic[Tree]):
         self.children = new_children
 
     def _set_item(
-        self: Tree,
+        self,
         path: str | NodePath,
-        item: Tree | T_DataArray,
+        item: Any,
         new_nodes_along_path: bool = False,
         allow_overwrite: bool = True,
     ) -> None:
@@ -614,7 +614,7 @@ class TreeNode(Generic[Tree]):
         if path.root:
             # absolute path
             current_node = self.root
-            root, *parts, name = path.parts
+            _root, *parts, name = path.parts
         else:
             # relative path
             current_node = self
@@ -631,16 +631,15 @@ class TreeNode(Generic[Tree]):
                         current_node = current_node.parent
                 elif part in ("", "."):
                     pass
+                elif part in current_node.children:
+                    current_node = current_node.children[part]
+                elif new_nodes_along_path:
+                    # Want child classes (i.e. DataTree) to populate tree with their own types
+                    new_node = type(self)()
+                    current_node._set(part, new_node)
+                    current_node = current_node.children[part]
                 else:
-                    if part in current_node.children:
-                        current_node = current_node.children[part]
-                    elif new_nodes_along_path:
-                        # Want child classes (i.e. DataTree) to populate tree with their own types
-                        new_node = type(self)()
-                        current_node._set(part, new_node)
-                        current_node = current_node.children[part]
-                    else:
-                        raise KeyError(f"Could not reach node at path {path}")
+                    raise KeyError(f"Could not reach node at path {path}")
 
         if name in current_node.children:
             # Deal with anything already existing at this location
@@ -651,7 +650,7 @@ class TreeNode(Generic[Tree]):
         else:
             current_node._set(name, item)
 
-    def __delitem__(self: Tree, key: str) -> None:
+    def __delitem__(self, key: str) -> None:
         """Remove a child node from this tree object."""
         if key in self.children:
             child = self._children[key]
@@ -660,7 +659,7 @@ class TreeNode(Generic[Tree]):
         else:
             raise KeyError(key)
 
-    def same_tree(self, other: Tree) -> bool:
+    def same_tree(self, other: Self) -> bool:
         """True if other node is in the same tree as this node."""
         return self.root is other.root
 
@@ -676,7 +675,7 @@ def _validate_name(name: str | None) ->
             raise ValueError("node names cannot contain forward slashes")
 
 
-class NamedNode(TreeNode, Generic[Tree]):
+class NamedNode(TreeNode):
     """
     A TreeNode which knows its own name.
 
@@ -684,10 +683,12 @@ class NamedNode(TreeNode, Generic[Tree])
     """
 
     _name: str | None
-    _parent: Tree | None
-    _children: dict[str, Tree]
 
-    def __init__(self, name=None, children=None):
+    def __init__(
+        self,
+        name: str | None = None,
+        children: Mapping[str, Self] | None = None,
+    ):
         super().__init__(children=children)
         _validate_name(name)
         self._name = name
@@ -737,12 +738,12 @@ class NamedNode(TreeNode, Generic[Tree])
         if self.is_root:
             return "/"
         else:
-            root, *ancestors = tuple(reversed(self.parents))
+            _root, *ancestors = tuple(reversed(self.parents))
             # don't include name of root because (a) root might not have a name & (b) we want path relative to root.
             names = [*(node.name for node in ancestors), self.name]
-            return "/" + "/".join(names)
+            return "/" + "/".join(names)  # type: ignore[arg-type]
 
-    def relative_to(self: NamedNode, other: NamedNode) -> str:
+    def relative_to(self, other: Self) -> str:
         """
         Compute the relative path from this node to node `other`.
 
@@ -754,7 +755,7 @@ class NamedNode(TreeNode, Generic[Tree])
             )
 
         this_path = NodePath(self.path)
-        if other.path in list(parent.path for parent in (self, *self.parents)):
+        if any(other.path == parent.path for parent in (self, *self.parents)):
             return str(this_path.relative_to(other.path))
         else:
             common_ancestor = self.find_common_ancestor(other)
@@ -763,7 +764,7 @@ class NamedNode(TreeNode, Generic[Tree])
                 path_to_common_ancestor / this_path.relative_to(common_ancestor.path)
             )
 
-    def find_common_ancestor(self, other: NamedNode) -> NamedNode:
+    def find_common_ancestor(self, other: Self) -> Self:
         """
         Find the first common ancestor of two nodes in the same tree.
 
@@ -781,19 +782,19 @@ class NamedNode(TreeNode, Generic[Tree])
             "Cannot find common ancestor because nodes do not lie within the same tree"
         )
 
-    def _path_to_ancestor(self, ancestor: NamedNode) -> NodePath:
+    def _path_to_ancestor(self, ancestor: Self) -> NodePath:
         """Return the relative path from this node to the given ancestor node"""
 
         if not self.same_tree(ancestor):
             raise NotFoundInTreeError(
                 "Cannot find relative path to ancestor because nodes do not lie within the same tree"
             )
-        if ancestor.path not in list(a.path for a in (self, *self.parents)):
+        if ancestor.path not in [a.path for a in (self, *self.parents)]:
             raise NotFoundInTreeError(
                 "Cannot find relative path to ancestor because given node is not an ancestor of this node"
             )
 
-        parents_paths = list(parent.path for parent in (self, *self.parents))
+        parents_paths = [parent.path for parent in (self, *self.parents)]
         generation_gap = list(parents_paths).index(ancestor.path)
         path_upwards = "../" * generation_gap if generation_gap > 0 else "."
         return NodePath(path_upwards)
diff -pruN 2025.03.1-8/xarray/core/types.py 2025.10.1-1/xarray/core/types.py
--- 2025.03.1-8/xarray/core/types.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/xarray/core/types.py	2025-10-10 10:38:05.000000000 +0000
@@ -1,7 +1,6 @@
 from __future__ import annotations
 
 import datetime
-import sys
 from collections.abc import Callable, Collection, Hashable, Iterator, Mapping, Sequence
 from types import EllipsisType
 from typing import (
@@ -9,7 +8,9 @@ from typing import (
     Any,
     Literal,
     Protocol,
+    Self,
     SupportsIndex,
+    TypeAlias,
     TypeVar,
     Union,
     overload,
@@ -18,21 +19,6 @@ from typing import (
 
 import numpy as np
 import pandas as pd
-
-try:
-    if sys.version_info >= (3, 11):
-        from typing import Self, TypeAlias
-    else:
-        from typing import TypeAlias
-
-        from typing_extensions import Self
-except ImportError:
-    if TYPE_CHECKING:
-        raise
-    else:
-        Self: Any = None
-
-
 from numpy._typing import _SupportsDType
 from numpy.typing import ArrayLike
 
@@ -46,7 +32,7 @@ if TYPE_CHECKING:
     from xarray.core.indexes import Index, Indexes
     from xarray.core.utils import Frozen
     from xarray.core.variable import IndexVariable, Variable
-    from xarray.groupers import Grouper, TimeResampler
+    from xarray.groupers import Grouper, Resampler
     from xarray.structure.alignment import Aligner
 
     GroupInput: TypeAlias = (
@@ -195,7 +181,7 @@ T_DataWithCoords = TypeVar("T_DataWithCo
 
 # Temporary placeholder for indicating an array api compliant type.
 # hopefully in the future we can narrow this down more:
-T_DuckArray = TypeVar("T_DuckArray", bound=Any, covariant=True)
+T_DuckArray = TypeVar("T_DuckArray", bound=Any, covariant=True)  # noqa: PLC0105
 
 # For typing pandas extension arrays.
 T_ExtensionArray = TypeVar("T_ExtensionArray", bound=pd.api.extensions.ExtensionArray)
@@ -214,8 +200,8 @@ Dims = Union[str, Collection[Hashable],
 
 # FYI in some cases we don't allow `None`, which this doesn't take account of.
 # FYI the `str` is for a size string, e.g. "16MB", supported by dask.
-T_ChunkDim: TypeAlias = str | int | Literal["auto"] | None | tuple[int, ...]
-T_ChunkDimFreq: TypeAlias = Union["TimeResampler", T_ChunkDim]
+T_ChunkDim: TypeAlias = str | int | Literal["auto"] | tuple[int, ...] | None  # noqa: PYI051
+T_ChunkDimFreq: TypeAlias = Union["Resampler", T_ChunkDim]
 T_ChunksFreq: TypeAlias = T_ChunkDim | Mapping[Any, T_ChunkDimFreq]
 # We allow the tuple form of this (though arguably we could transition to named dims only)
 T_Chunks: TypeAlias = T_ChunkDim | Mapping[Any, T_ChunkDim]
@@ -253,16 +239,16 @@ InterpolantOptions = Literal[
 InterpnOptions = Literal["linear", "nearest", "slinear", "cubic", "quintic", "pchip"]
 InterpOptions = Union[Interp1dOptions, InterpolantOptions, InterpnOptions]
 
-DatetimeUnitOptions = Literal[
-    "Y", "M", "W", "D", "h", "m", "s", "ms", "us", "μs", "ns", "ps", "fs", "as", None
-]
+DatetimeUnitOptions = (
+    Literal["W", "D", "h", "m", "s", "ms", "us", "μs", "ns", "ps", "fs", "as"] | None
+)
 NPDatetimeUnitOptions = Literal["D", "h", "m", "s", "ms", "us", "ns"]
 PDDatetimeUnitOptions = Literal["s", "ms", "us", "ns"]
 
-QueryEngineOptions = Literal["python", "numexpr", None]
+QueryEngineOptions = Literal["python", "numexpr"] | None
 QueryParserOptions = Literal["pandas", "python"]
 
-ReindexMethodOptions = Literal["nearest", "pad", "ffill", "backfill", "bfill", None]
+ReindexMethodOptions = Literal["nearest", "pad", "ffill", "backfill", "bfill"] | None
 
 PadModeOptions = Literal[
     "constant",
@@ -281,7 +267,7 @@ T_VarPadConstantValues = T_PadConstantVa
 T_DatasetPadConstantValues = (
     T_VarPadConstantValues | Mapping[Any, T_VarPadConstantValues]
 )
-PadReflectOptions = Literal["even", "odd", None]
+PadReflectOptions = Literal["even", "odd"] | None
 
 CFCalendar = Literal[
     "standard",
@@ -299,10 +285,10 @@ CoarsenBoundaryOptions = Literal["exact"
 SideOptions = Literal["left", "right"]
 InclusiveOptions = Literal["both", "neither", "left", "right"]
 
-ScaleOptions = Literal["linear", "symlog", "log", "logit", None]
-HueStyleOptions = Literal["continuous", "discrete", None]
+ScaleOptions = Literal["linear", "symlog", "log", "logit"] | None
+HueStyleOptions = Literal["continuous", "discrete"] | None
 AspectOptions = Union[Literal["auto", "equal"], float, None]
-ExtendOptions = Literal["neither", "both", "min", "max", None]
+ExtendOptions = Literal["neither", "both", "min", "max"] | None
 
 
 _T_co = TypeVar("_T_co", covariant=True)
@@ -318,6 +304,10 @@ class NestedSequence(Protocol[_T_co]):
     def __reversed__(self, /) -> Iterator[_T_co | NestedSequence[_T_co]]: ...
 
 
+_T = TypeVar("_T")
+NestedDict = dict[str, "NestedDict[_T] | _T"]
+
+
 AnyStr_co = TypeVar("AnyStr_co", str, bytes, covariant=True)
 
 
@@ -329,7 +319,7 @@ class BaseBuffer(Protocol):
         # for _get_filepath_or_buffer
         ...
 
-    def seek(self, __offset: int, __whence: int = ...) -> int:
+    def seek(self, offset: int, whence: int = ..., /) -> int:
         # with one argument: gzip.GzipFile, bz2.BZ2File
         # with two arguments: zip.ZipFile, read_sas
         ...
@@ -345,7 +335,7 @@ class BaseBuffer(Protocol):
 
 @runtime_checkable
 class ReadBuffer(BaseBuffer, Protocol[AnyStr_co]):
-    def read(self, __n: int = ...) -> AnyStr_co:
+    def read(self, n: int = ..., /) -> AnyStr_co:
         # for BytesIOWrapper, gzip.GzipFile, bz2.BZ2File
         ...
 
@@ -378,3 +368,14 @@ Bins = Union[
 ]
 
 ResampleCompatible: TypeAlias = str | datetime.timedelta | pd.Timedelta | pd.DateOffset
+
+
+class Closable(Protocol):
+    def close(self) -> None: ...
+
+
+class Lock(Protocol):
+    def acquire(self, *args, **kwargs) -> Any: ...
+    def release(self) -> None: ...
+    def __enter__(self) -> Any: ...
+    def __exit__(self, *args, **kwargs) -> None: ...
diff -pruN 2025.03.1-8/xarray/core/utils.py 2025.10.1-1/xarray/core/utils.py
--- 2025.03.1-8/xarray/core/utils.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/xarray/core/utils.py	2025-10-10 10:38:05.000000000 +0000
@@ -61,13 +61,24 @@ from collections.abc import (
     MutableMapping,
     MutableSet,
     Sequence,
-    Set,
     ValuesView,
 )
+from collections.abc import (
+    Set as AbstractSet,
+)
 from enum import Enum
 from pathlib import Path
 from types import EllipsisType, ModuleType
-from typing import TYPE_CHECKING, Any, Generic, Literal, TypeGuard, TypeVar, overload
+from typing import (
+    TYPE_CHECKING,
+    Any,
+    Generic,
+    Literal,
+    TypeGuard,
+    TypeVar,
+    cast,
+    overload,
+)
 
 import numpy as np
 import pandas as pd
@@ -86,13 +97,27 @@ from xarray.namedarray.utils import (  #
 )
 
 if TYPE_CHECKING:
-    from xarray.core.types import Dims, ErrorOptionsWithWarn
+    from xarray.core.types import Dims, ErrorOptionsWithWarn, NestedDict
 
 K = TypeVar("K")
 V = TypeVar("V")
 T = TypeVar("T")
 
 
+def is_allowed_extension_array_dtype(dtype: Any):
+    return pd.api.types.is_extension_array_dtype(dtype) and not isinstance(  # noqa: TID251
+        dtype, pd.StringDtype
+    )
+
+
+def is_allowed_extension_array(array: Any) -> bool:
+    return (
+        hasattr(array, "dtype")
+        and is_allowed_extension_array_dtype(array.dtype)
+        and not isinstance(array, pd.arrays.NumpyExtensionArray)  # type: ignore[attr-defined]
+    )
+
+
 def alias_message(old_name: str, new_name: str) -> str:
     return f"{old_name} has been deprecated. Use {new_name} instead."
 
@@ -214,23 +239,40 @@ def equivalent(first: T, second: T) -> b
     """Compare two objects for equivalence (identity or equality), using
     array_equiv if either object is an ndarray. If both objects are lists,
     equivalent is sequentially called on all the elements.
+
+    Returns False for any comparison that doesn't return a boolean,
+    making this function safer to use with objects that have non-standard
+    __eq__ implementations.
     """
     # TODO: refactor to avoid circular import
     from xarray.core import duck_array_ops
 
     if first is second:
         return True
+
     if isinstance(first, np.ndarray) or isinstance(second, np.ndarray):
         return duck_array_ops.array_equiv(first, second)
+
     if isinstance(first, list) or isinstance(second, list):
         return list_equiv(first, second)  # type: ignore[arg-type]
-    return (first == second) or (pd.isnull(first) and pd.isnull(second))  # type: ignore[call-overload]
+
+    # For non-array/list types, use == but require boolean result
+    result = first == second
+    if not isinstance(result, bool):
+        # Accept numpy bool scalars as well
+        if isinstance(result, np.bool_):
+            return bool(result)
+        # Reject any other non-boolean type (Dataset, Series, custom objects, etc.)
+        return False
+
+    # Check for NaN equivalence
+    return result or (pd.isnull(first) and pd.isnull(second))  # type: ignore[call-overload]
 
 
 def list_equiv(first: Sequence[T], second: Sequence[T]) -> bool:
     if len(first) != len(second):
         return False
-    return all(equivalent(f, s) for f, s in zip(first, second, strict=True))
+    return all(itertools.starmap(equivalent, zip(first, second, strict=True)))
 
 
 def peek_at(iterable: Iterable[T]) -> tuple[T, Iterator[T]]:
@@ -293,6 +335,25 @@ def remove_incompatible_items(
             del first_dict[k]
 
 
+def flat_items(
+    nested: Mapping[str, NestedDict[T] | T],
+    prefix: str | None = None,
+    separator: str = "/",
+) -> Iterable[tuple[str, T]]:
+    """Yields flat items from a nested dictionary of dicts.
+
+    Notes:
+    - Only dict subclasses are flattened.
+    - Duplicate items are not removed. These should be checked separately.
+    """
+    for key, value in nested.items():
+        key = prefix + separator + key if prefix is not None else key
+        if isinstance(value, dict):
+            yield from flat_items(value, key, separator)
+        else:
+            yield key, value
+
+
 def is_full_slice(value: Any) -> bool:
     return isinstance(value, slice) and value == slice(None)
 
@@ -669,15 +730,12 @@ def is_remote_uri(path: str) -> bool:
 
 def read_magic_number_from_file(filename_or_obj, count=8) -> bytes:
     # check byte header to determine file type
-    if isinstance(filename_or_obj, bytes):
-        magic_number = filename_or_obj[:count]
-    elif isinstance(filename_or_obj, io.IOBase):
-        if filename_or_obj.tell() != 0:
-            filename_or_obj.seek(0)
-        magic_number = filename_or_obj.read(count)
-        filename_or_obj.seek(0)
-    else:
+    if not isinstance(filename_or_obj, io.IOBase):
         raise TypeError(f"cannot read the magic number from {type(filename_or_obj)}")
+    if filename_or_obj.tell() != 0:
+        filename_or_obj.seek(0)
+    magic_number = filename_or_obj.read(count)
+    filename_or_obj.seek(0)
     return magic_number
 
 
@@ -695,10 +753,8 @@ def try_read_magic_number_from_path(path
 def try_read_magic_number_from_file_or_path(filename_or_obj, count=8) -> bytes | None:
     magic_number = try_read_magic_number_from_path(filename_or_obj, count)
     if magic_number is None:
-        try:
+        with contextlib.suppress(TypeError):
             magic_number = read_magic_number_from_file(filename_or_obj, count)
-        except TypeError:
-            pass
     return magic_number
 
 
@@ -751,7 +807,7 @@ def decode_numpy_dict_values(attrs: Mapp
     attrs = dict(attrs)
     for k, v in attrs.items():
         if isinstance(v, np.ndarray):
-            attrs[k] = v.tolist()
+            attrs[k] = cast(V, v.tolist())
         elif isinstance(v, np.generic):
             attrs[k] = v.item()
     return attrs
@@ -888,7 +944,7 @@ def parse_dims_as_tuple(
     *,
     check_exists: bool = True,
     replace_none: Literal[False],
-) -> tuple[Hashable, ...] | None | EllipsisType: ...
+) -> tuple[Hashable, ...] | EllipsisType | None: ...
 
 
 def parse_dims_as_tuple(
@@ -897,7 +953,7 @@ def parse_dims_as_tuple(
     *,
     check_exists: bool = True,
     replace_none: bool = True,
-) -> tuple[Hashable, ...] | None | EllipsisType:
+) -> tuple[Hashable, ...] | EllipsisType | None:
     """Parse one or more dimensions.
 
     A single dimension must be always a str, multiple dimensions
@@ -949,7 +1005,7 @@ def parse_dims_as_set(
     *,
     check_exists: bool = True,
     replace_none: Literal[False],
-) -> set[Hashable] | None | EllipsisType: ...
+) -> set[Hashable] | EllipsisType | None: ...
 
 
 def parse_dims_as_set(
@@ -958,7 +1014,7 @@ def parse_dims_as_set(
     *,
     check_exists: bool = True,
     replace_none: bool = True,
-) -> set[Hashable] | None | EllipsisType:
+) -> set[Hashable] | EllipsisType | None:
     """Like parse_dims_as_tuple, but returning a set instead of a tuple."""
     # TODO: Consider removing parse_dims_as_tuple?
     if dim is None or dim is ...:
@@ -990,7 +1046,7 @@ def parse_ordered_dims(
     *,
     check_exists: bool = True,
     replace_none: Literal[False],
-) -> tuple[Hashable, ...] | None | EllipsisType: ...
+) -> tuple[Hashable, ...] | EllipsisType | None: ...
 
 
 def parse_ordered_dims(
@@ -999,7 +1055,7 @@ def parse_ordered_dims(
     *,
     check_exists: bool = True,
     replace_none: bool = True,
-) -> tuple[Hashable, ...] | None | EllipsisType:
+) -> tuple[Hashable, ...] | EllipsisType | None:
     """Parse one or more dimensions.
 
     A single dimension must be always a str, multiple dimensions
@@ -1048,7 +1104,7 @@ def parse_ordered_dims(
         )
 
 
-def _check_dims(dim: Set[Hashable], all_dims: Set[Hashable]) -> None:
+def _check_dims(dim: AbstractSet[Hashable], all_dims: AbstractSet[Hashable]) -> None:
     wrong_dims = (dim - all_dims) - {...}
     if wrong_dims:
         wrong_dims_str = ", ".join(f"'{d}'" for d in wrong_dims)
@@ -1077,7 +1133,7 @@ class UncachedAccessor(Generic[_Accessor
     @overload
     def __get__(self, obj: object, cls) -> _Accessor: ...
 
-    def __get__(self, obj: None | object, cls) -> type[_Accessor] | _Accessor:
+    def __get__(self, obj: object | None, cls) -> type[_Accessor] | _Accessor:
         if obj is None:
             return self._accessor
 
@@ -1278,12 +1334,12 @@ def attempt_import(module: str) -> Modul
         matplotlib="for plotting",
         hypothesis="for the `xarray.testing.strategies` submodule",
     )
-    package_name = module.split(".")[0]  # e.g. "zarr" from "zarr.storage"
+    package_name = module.split(".", maxsplit=1)[0]  # e.g. "zarr" from "zarr.storage"
     install_name = install_mapping.get(package_name, package_name)
     reason = package_purpose.get(package_name, "")
     try:
         return importlib.import_module(module)
-    except (ImportError, ModuleNotFoundError) as e:
+    except ImportError as e:
         raise ImportError(
             f"The {install_name} package is required {reason}"
             " but could not be imported."
diff -pruN 2025.03.1-8/xarray/core/variable.py 2025.10.1-1/xarray/core/variable.py
--- 2025.03.1-8/xarray/core/variable.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/xarray/core/variable.py	2025-10-10 10:38:05.000000000 +0000
@@ -13,7 +13,7 @@ from typing import TYPE_CHECKING, Any, N
 import numpy as np
 import pandas as pd
 from numpy.typing import ArrayLike
-from pandas.api.types import is_extension_array_dtype
+from packaging.version import Version
 
 import xarray as xr  # only for Dataset and DataArray
 from xarray.compat.array_api_compat import to_like_array
@@ -24,6 +24,7 @@ from xarray.core.common import AbstractA
 from xarray.core.extension_array import PandasExtensionArray
 from xarray.core.indexing import (
     BasicIndexer,
+    CoordinateTransformIndexingAdapter,
     OuterIndexer,
     PandasIndexingAdapter,
     VectorizedIndexer,
@@ -40,6 +41,7 @@ from xarray.core.utils import (
     emit_user_level_warning,
     ensure_us_time_resolution,
     infix_dims,
+    is_allowed_extension_array,
     is_dict_like,
     is_duck_array,
     is_duck_dask_array,
@@ -48,6 +50,7 @@ from xarray.core.utils import (
 from xarray.namedarray.core import NamedArray, _raise_if_any_duplicate_dimensions
 from xarray.namedarray.parallelcompat import get_chunked_array_type
 from xarray.namedarray.pycompat import (
+    async_to_duck_array,
     integer_types,
     is_0d_dask_array,
     is_chunked_array,
@@ -60,9 +63,15 @@ NON_NUMPY_SUPPORTED_ARRAY_TYPES = (
     indexing.ExplicitlyIndexed,
     pd.Index,
     pd.api.extensions.ExtensionArray,
+    PandasExtensionArray,
 )
 # https://github.com/python/mypy/issues/224
 BASIC_INDEXING_TYPES = integer_types + (slice,)
+UNSUPPORTED_EXTENSION_ARRAY_TYPES = (
+    pd.arrays.DatetimeArray,
+    pd.arrays.TimedeltaArray,
+    pd.arrays.NumpyExtensionArray,  # type: ignore[attr-defined]
+)
 
 if TYPE_CHECKING:
     from xarray.core.types import (
@@ -168,15 +177,14 @@ def as_variable(
             f"explicit list of dimensions: {obj!r}"
         )
 
-    if auto_convert:
-        if name is not None and name in obj.dims and obj.ndim == 1:
-            # automatically convert the Variable into an Index
-            emit_user_level_warning(
-                f"variable {name!r} with name matching its dimension will not be "
-                "automatically converted into an `IndexVariable` object in the future.",
-                FutureWarning,
-            )
-            obj = obj.to_index_variable()
+    if auto_convert and name is not None and name in obj.dims and obj.ndim == 1:
+        # automatically convert the Variable into an Index
+        emit_user_level_warning(
+            f"variable {name!r} with name matching its dimension will not be "
+            "automatically converted into an `IndexVariable` object in the future.",
+            FutureWarning,
+        )
+        obj = obj.to_index_variable()
 
     return obj
 
@@ -191,14 +199,21 @@ def _maybe_wrap_data(data):
     """
     if isinstance(data, pd.Index):
         return PandasIndexingAdapter(data)
-    if isinstance(data, pd.api.extensions.ExtensionArray):
-        return PandasExtensionArray[type(data)](data)
+    if isinstance(data, UNSUPPORTED_EXTENSION_ARRAY_TYPES):
+        return data.to_numpy()
+    if isinstance(
+        data, pd.api.extensions.ExtensionArray
+    ) and is_allowed_extension_array(data):
+        return PandasExtensionArray(data)
     return data
 
 
 def _possibly_convert_objects(values):
     """Convert object arrays into datetime64 and timedelta64 according
-    to the pandas convention.
+    to the pandas convention.  For backwards compat, as of 3.0.0 pandas,
+    object dtype inputs are cast to strings by `pandas.Series`
+    but we output them as object dtype with the input metadata preserved as well.
+
 
     * datetime.datetime
     * datetime.timedelta
@@ -213,6 +228,17 @@ def _possibly_convert_objects(values):
             result.flags.writeable = True
         except ValueError:
             result = result.copy()
+    # For why we need this behavior: https://github.com/pandas-dev/pandas/issues/61938
+    # Object datatype inputs that are strings
+    # will be converted to strings by `pandas.Series`, and as of 3.0.0, lose
+    # `dtype.metadata`.  If the roundtrip back to numpy in this function yields an
+    # object array again, the dtype.metadata will be preserved.
+    if (
+        result.dtype.kind == "O"
+        and values.dtype.kind == "O"
+        and Version(pd.__version__) >= Version("3.0.0dev0")
+    ):
+        result.dtype = values.dtype
     return result
 
 
@@ -252,7 +278,15 @@ def as_compatible_data(
 
     # we don't want nested self-described arrays
     if isinstance(data, pd.Series | pd.DataFrame):
-        pandas_data = data.values
+        if (
+            isinstance(data, pd.Series)
+            and is_allowed_extension_array(data.array)
+            # Some datetime types are not allowed as well as backing Variable types
+            and not isinstance(data.array, UNSUPPORTED_EXTENSION_ARRAY_TYPES)
+        ):
+            pandas_data = data.array
+        else:
+            pandas_data = data.values  # type: ignore[assignment]
         if isinstance(pandas_data, NON_NUMPY_SUPPORTED_ARRAY_TYPES):
             return convert_non_numpy_type(pandas_data)
         else:
@@ -261,7 +295,7 @@ def as_compatible_data(
     if isinstance(data, np.ma.MaskedArray):
         mask = np.ma.getmaskarray(data)
         if mask.any():
-            dtype, fill_value = dtypes.maybe_promote(data.dtype)
+            _dtype, fill_value = dtypes.maybe_promote(data.dtype)
             data = duck_array_ops.where_method(data, ~mask, fill_value)
         else:
             data = np.asarray(data)
@@ -366,7 +400,7 @@ class Variable(NamedArray, AbstractArray
             dims=dims, data=as_compatible_data(data, fastpath=fastpath), attrs=attrs
         )
 
-        self._encoding = None
+        self._encoding: dict[Any, Any] | None = None
         if encoding is not None:
             self.encoding = encoding
 
@@ -390,9 +424,15 @@ class Variable(NamedArray, AbstractArray
             return cls_(dims_, data, attrs_)
 
     @property
-    def _in_memory(self):
+    def _in_memory(self) -> bool:
+        if isinstance(
+            self._data, PandasIndexingAdapter | CoordinateTransformIndexingAdapter
+        ):
+            return self._data._in_memory
+
         return isinstance(
-            self._data, np.ndarray | np.number | PandasIndexingAdapter
+            self._data,
+            np.ndarray | np.number | PandasExtensionArray,
         ) or (
             isinstance(self._data, indexing.MemoryCachedArray)
             and isinstance(self._data.array, indexing.NumpyIndexingAdapter)
@@ -410,14 +450,22 @@ class Variable(NamedArray, AbstractArray
         Variable.as_numpy
         Variable.values
         """
-        if is_duck_array(self._data):
-            return self._data
+        if isinstance(self._data, PandasExtensionArray):
+            duck_array = self._data.array
         elif isinstance(self._data, indexing.ExplicitlyIndexed):
-            return self._data.get_duck_array()
-        else:
-            return self.values
+            duck_array = self._data.get_duck_array()
+        elif is_duck_array(self._data):
+            duck_array = self._data
+        else:
+            duck_array = self.values
+        if isinstance(duck_array, PandasExtensionArray):
+            # even though PandasExtensionArray is a duck array,
+            # we should not return the PandasExtensionArray wrapper,
+            # and instead return the underlying data.
+            return duck_array.array
+        return duck_array
 
-    @data.setter
+    @data.setter  # type: ignore[override,unused-ignore]
     def data(self, data: T_DuckArray | ArrayLike) -> None:
         data = as_compatible_data(data)
         self._check_shape(data)
@@ -599,7 +647,10 @@ class Variable(NamedArray, AbstractArray
             k.item() if isinstance(k, np.ndarray) and k.ndim == 0 else k for k in key
         )
 
-        if all(isinstance(k, BASIC_INDEXING_TYPES) for k in key):
+        if all(
+            (isinstance(k, BASIC_INDEXING_TYPES) and not isinstance(k, bool))
+            for k in key
+        ):
             return self._broadcast_indexes_basic(key)
 
         self._validate_indexers(key)
@@ -855,7 +906,8 @@ class Variable(NamedArray, AbstractArray
     def encoding(self) -> dict[Any, Any]:
         """Dictionary of encodings on this variable."""
         if self._encoding is None:
-            self._encoding = {}
+            encoding: dict[Any, Any] = {}
+            self._encoding = encoding
         return self._encoding
 
     @encoding.setter
@@ -897,9 +949,9 @@ class Variable(NamedArray, AbstractArray
 
         else:
             ndata = as_compatible_data(data)
-            if self.shape != ndata.shape:  # type: ignore[attr-defined]
+            if self.shape != ndata.shape:
                 raise ValueError(
-                    f"Data shape {ndata.shape} must match shape of object {self.shape}"  # type: ignore[attr-defined]
+                    f"Data shape {ndata.shape} must match shape of object {self.shape}"
                 )
 
         attrs = copy.deepcopy(self._attrs, memo) if deep else copy.copy(self._attrs)
@@ -923,14 +975,16 @@ class Variable(NamedArray, AbstractArray
             data = copy.copy(self.data)
         if attrs is _default:
             attrs = copy.copy(self._attrs)
-
         if encoding is _default:
             encoding = copy.copy(self._encoding)
         return type(self)(dims, data, attrs, encoding, fastpath=True)
 
-    def load(self, **kwargs):
-        """Manually trigger loading of this variable's data from disk or a
-        remote source into memory and return this variable.
+    def load(self, **kwargs) -> Self:
+        """Trigger loading data into memory and return this variable.
+
+        Data will be computed and/or loaded from disk or a remote source.
+
+        Unlike ``.compute``, the original variable is modified and returned.
 
         Normally, it should not be necessary to call this method in user code,
         because all xarray functions should either work on deferred data or
@@ -941,17 +995,61 @@ class Variable(NamedArray, AbstractArray
         **kwargs : dict
             Additional keyword arguments passed on to ``dask.array.compute``.
 
+        Returns
+        -------
+        object : Variable
+            Same object but with lazy data as an in-memory array.
+
         See Also
         --------
         dask.array.compute
+        Variable.compute
+        Variable.load_async
+        DataArray.load
+        Dataset.load
         """
         self._data = to_duck_array(self._data, **kwargs)
         return self
 
-    def compute(self, **kwargs):
-        """Manually trigger loading of this variable's data from disk or a
-        remote source into memory and return a new variable. The original is
-        left unaltered.
+    async def load_async(self, **kwargs) -> Self:
+        """Trigger and await asynchronous loading of data into memory and return this variable.
+
+        Data will be computed and/or loaded from disk or a remote source.
+
+        Unlike ``.compute``, the original variable is modified and returned.
+
+        Only works when opening data lazily from IO storage backends which support lazy asynchronous loading.
+        Otherwise will raise a NotImplementedError.
+
+        Note users are expected to limit concurrency themselves - xarray does not internally limit concurrency in any way.
+
+        Parameters
+        ----------
+        **kwargs : dict
+            Additional keyword arguments passed on to ``dask.array.compute``.
+
+        Returns
+        -------
+        object : Variable
+            Same object but with lazy data as an in-memory array.
+
+        See Also
+        --------
+        dask.array.compute
+        Variable.load
+        Variable.compute
+        DataArray.load_async
+        Dataset.load_async
+        """
+        self._data = await async_to_duck_array(self._data, **kwargs)
+        return self
+
+    def compute(self, **kwargs) -> Self:
+        """Trigger loading data into memory and return a new variable.
+
+        Data will be computed and/or loaded from disk or a remote source.
+
+        The original variable is left unaltered.
 
         Normally, it should not be necessary to call this method in user code,
         because all xarray functions should either work on deferred data or
@@ -962,9 +1060,18 @@ class Variable(NamedArray, AbstractArray
         **kwargs : dict
             Additional keyword arguments passed on to ``dask.array.compute``.
 
+        Returns
+        -------
+        object : Variable
+            New object with the data as an in-memory array.
+
         See Also
         --------
         dask.array.compute
+        Variable.load
+        Variable.load_async
+        DataArray.compute
+        Dataset.compute
         """
         new = self.copy(deep=False)
         return new.load(**kwargs)
@@ -1043,7 +1150,7 @@ class Variable(NamedArray, AbstractArray
         numpy.squeeze
         """
         dims = common.get_squeeze_dims(self, dim)
-        return self.isel({d: 0 for d in dims})
+        return self.isel(dict.fromkeys(dims, 0))
 
     def _shift_one_dim(self, dim, count, fill_value=dtypes.NA):
         axis = self.get_axis_num(dim)
@@ -1347,7 +1454,7 @@ class Variable(NamedArray, AbstractArray
             dim = [dim]
 
         if shape is None and is_dict_like(dim):
-            shape = dim.values()
+            shape = tuple(dim.values())
 
         missing_dims = set(self.dims) - set(dim)
         if missing_dims:
@@ -1363,13 +1470,18 @@ class Variable(NamedArray, AbstractArray
             # don't use broadcast_to unless necessary so the result remains
             # writeable if possible
             expanded_data = self.data
-        elif shape is not None:
-            dims_map = dict(zip(dim, shape, strict=True))
-            tmp_shape = tuple(dims_map[d] for d in expanded_dims)
-            expanded_data = duck_array_ops.broadcast_to(self.data, tmp_shape)
-        else:
+        elif shape is None or all(
+            s == 1 for s, e in zip(shape, dim, strict=True) if e not in self_dims
+        ):
+            # "Trivial" broadcasting, i.e. simply inserting a new dimension
+            # This is typically easier for duck arrays to implement
+            # than the full "broadcast_to" semantics
             indexer = (None,) * (len(expanded_dims) - self.ndim) + (...,)
             expanded_data = self.data[indexer]
+        else:  # elif shape is not None:
+            dims_map = dict(zip(dim, shape, strict=True))
+            tmp_shape = tuple(dims_map[d] for d in expanded_dims)
+            expanded_data = duck_array_ops.broadcast_to(self._data, tmp_shape)
 
         expanded_var = Variable(
             expanded_dims, expanded_data, self._attrs, self._encoding, fastpath=True
@@ -2152,10 +2264,10 @@ class Variable(NamedArray, AbstractArray
         Construct a reshaped-array for coarsen
         """
         if not is_dict_like(boundary):
-            boundary = {d: boundary for d in windows.keys()}
+            boundary = dict.fromkeys(windows.keys(), boundary)
 
         if not is_dict_like(side):
-            side = {d: side for d in windows.keys()}
+            side = dict.fromkeys(windows.keys(), side)
 
         # remove unrelated dimensions
         boundary = {k: v for k, v in boundary.items() if k in windows}
@@ -2205,8 +2317,7 @@ class Variable(NamedArray, AbstractArray
         for i, d in enumerate(variable.dims):
             if d in windows:
                 size = variable.shape[i]
-                shape.append(int(size / windows[d]))
-                shape.append(windows[d])
+                shape.extend((int(size / windows[d]), windows[d]))
                 axis_count += 1
                 axes.append(i + axis_count)
             else:
@@ -2304,7 +2415,7 @@ class Variable(NamedArray, AbstractArray
         """
         return self._new(data=self.data.real)
 
-    def __array_wrap__(self, obj, context=None):
+    def __array_wrap__(self, obj, context=None, return_scalar=False):
         return Variable(self.dims, obj)
 
     def _unary_op(self, f, *args, **kwargs):
@@ -2593,11 +2704,6 @@ class Variable(NamedArray, AbstractArray
         dask.array.from_array
         """
 
-        if is_extension_array_dtype(self):
-            raise ValueError(
-                f"{self} was found to be a Pandas ExtensionArray.  Please convert to numpy first."
-            )
-
         if from_array_kwargs is None:
             from_array_kwargs = {}
 
@@ -2654,6 +2760,10 @@ class IndexVariable(Variable):
         # data is already loaded into memory for IndexVariable
         return self
 
+    async def load_async(self):
+        # data is already loaded into memory for IndexVariable
+        return self
+
     # https://github.com/python/mypy/issues/1465
     @Variable.data.setter  # type: ignore[attr-defined]
     def data(self, data):
@@ -2783,9 +2893,9 @@ class IndexVariable(Variable):
 
         else:
             ndata = as_compatible_data(data)
-            if self.shape != ndata.shape:  # type: ignore[attr-defined]
+            if self.shape != ndata.shape:
                 raise ValueError(
-                    f"Data shape {ndata.shape} must match shape of object {self.shape}"  # type: ignore[attr-defined]
+                    f"Data shape {ndata.shape} must match shape of object {self.shape}"
                 )
 
         attrs = copy.deepcopy(self._attrs) if deep else copy.copy(self._attrs)
@@ -2844,13 +2954,13 @@ class IndexVariable(Variable):
             return index
 
     @property
-    def level_names(self) -> list[str] | None:
+    def level_names(self) -> list[Hashable | None] | None:
         """Return MultiIndex level names or None if this IndexVariable has no
         MultiIndex.
         """
         index = self.to_index()
         if isinstance(index, pd.MultiIndex):
-            return index.names
+            return list(index.names)
         else:
             return None
 
@@ -2921,15 +3031,15 @@ def broadcast_variables(*variables: Vari
 
 
 def _broadcast_compat_data(self, other):
-    if not OPTIONS["arithmetic_broadcast"]:
-        if (isinstance(other, Variable) and self.dims != other.dims) or (
-            is_duck_array(other) and self.ndim != other.ndim
-        ):
-            raise ValueError(
-                "Broadcasting is necessary but automatic broadcasting is disabled via "
-                "global option `'arithmetic_broadcast'`. "
-                "Use `xr.set_options(arithmetic_broadcast=True)` to enable automatic broadcasting."
-            )
+    if not OPTIONS["arithmetic_broadcast"] and (
+        (isinstance(other, Variable) and self.dims != other.dims)
+        or (is_duck_array(other) and self.ndim != other.ndim)
+    ):
+        raise ValueError(
+            "Broadcasting is necessary but automatic broadcasting is disabled via "
+            "global option `'arithmetic_broadcast'`. "
+            "Use `xr.set_options(arithmetic_broadcast=True)` to enable automatic broadcasting."
+        )
 
     if all(hasattr(other, attr) for attr in ["dims", "data", "shape", "encoding"]):
         # `other` satisfies the necessary Variable API for broadcast_variables
diff -pruN 2025.03.1-8/xarray/groupers.py 2025.10.1-1/xarray/groupers.py
--- 2025.03.1-8/xarray/groupers.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/xarray/groupers.py	2025-10-10 10:38:05.000000000 +0000
@@ -7,9 +7,14 @@ to integer codes (one per group).
 from __future__ import annotations
 
 import datetime
+import functools
+import itertools
+import operator
 from abc import ABC, abstractmethod
+from collections import defaultdict
+from collections.abc import Hashable, Mapping, Sequence
 from dataclasses import dataclass, field
-from itertools import pairwise
+from itertools import chain, pairwise
 from typing import TYPE_CHECKING, Any, Literal, cast
 
 import numpy as np
@@ -17,10 +22,17 @@ import pandas as pd
 from numpy.typing import ArrayLike
 
 from xarray.coding.cftime_offsets import BaseCFTimeOffset, _new_to_legacy_freq
+from xarray.coding.cftimeindex import CFTimeIndex
+from xarray.compat.toolzcompat import sliding_window
 from xarray.computation.apply_ufunc import apply_ufunc
-from xarray.core.coordinates import Coordinates, _coordinates_from_variable
+from xarray.core.common import (
+    _contains_cftime_datetimes,
+    _contains_datetime_like_objects,
+)
+from xarray.core.coordinates import Coordinates, coordinates_from_variable
 from xarray.core.dataarray import DataArray
 from xarray.core.duck_array_ops import array_all, isnull
+from xarray.core.formatting import first_n_items
 from xarray.core.groupby import T_Group, _DummyGroup
 from xarray.core.indexes import safe_cast_to_index
 from xarray.core.resample_cftime import CFTimeGrouper
@@ -40,6 +52,8 @@ __all__ = [
     "EncodedGroups",
     "Grouper",
     "Resampler",
+    "SeasonGrouper",
+    "SeasonResampler",
     "TimeResampler",
     "UniqueGrouper",
 ]
@@ -69,9 +83,9 @@ class EncodedGroups:
 
     codes: DataArray
     full_index: pd.Index
-    group_indices: GroupIndices
-    unique_coord: Variable | _DummyGroup
-    coords: Coordinates
+    group_indices: GroupIndices = field(init=False, repr=False)
+    unique_coord: Variable | _DummyGroup = field(init=False, repr=False)
+    coords: Coordinates = field(init=False, repr=False)
 
     def __init__(
         self,
@@ -106,7 +120,10 @@ class EncodedGroups:
             self.group_indices = group_indices
 
         if unique_coord is None:
-            unique_values = full_index[np.unique(codes)]
+            unique_codes = np.sort(pd.unique(codes.data))
+            # Skip the -1 sentinel
+            unique_codes = unique_codes[unique_codes >= 0]
+            unique_values = full_index[unique_codes]
             self.unique_coord = Variable(
                 dims=codes.name, data=unique_values, attrs=codes.attrs
             )
@@ -115,7 +132,7 @@ class EncodedGroups:
 
         if coords is None:
             assert not isinstance(self.unique_coord, _DummyGroup)
-            self.coords = _coordinates_from_variable(self.unique_coord)
+            self.coords = coordinates_from_variable(self.unique_coord)
         else:
             self.coords = coords
 
@@ -154,7 +171,26 @@ class Resampler(Grouper):
     Currently only used for TimeResampler, but could be used for SpaceResampler in the future.
     """
 
-    pass
+    def compute_chunks(self, variable: Variable, *, dim: Hashable) -> tuple[int, ...]:
+        """
+        Compute chunk sizes for this resampler.
+
+        This method should be implemented by subclasses to provide appropriate
+        chunking behavior for their specific resampling strategy.
+
+        Parameters
+        ----------
+        variable : Variable
+            The variable being chunked.
+        dim : Hashable
+            The name of the dimension being chunked.
+
+        Returns
+        -------
+        tuple[int, ...]
+            A tuple of chunk sizes for the dimension.
+        """
+        raise NotImplementedError("Subclasses must implement compute_chunks method")
 
 
 @dataclass
@@ -171,7 +207,7 @@ class UniqueGrouper(Grouper):
         present in ``labels`` will be ignored.
     """
 
-    _group_as_index: pd.Index | None = field(default=None, repr=False)
+    _group_as_index: pd.Index | None = field(default=None, repr=False, init=False)
     labels: ArrayLike | None = field(default=None)
 
     @property
@@ -252,7 +288,7 @@ class UniqueGrouper(Grouper):
             codes=codes,
             full_index=full_index,
             unique_coord=unique_coord,
-            coords=_coordinates_from_variable(unique_coord),
+            coords=coordinates_from_variable(unique_coord),
         )
 
     def _factorize_dummy(self) -> EncodedGroups:
@@ -280,7 +316,7 @@ class UniqueGrouper(Grouper):
             else:
                 if TYPE_CHECKING:
                     assert isinstance(unique_coord, Variable)
-                coords = _coordinates_from_variable(unique_coord)
+                coords = coordinates_from_variable(unique_coord)
 
         return EncodedGroups(
             codes=codes,
@@ -319,7 +355,7 @@ class BinGrouper(Grouper):
         the resulting bins. If False, returns only integer indicators of the
         bins. This affects the type of the output container (see below).
         This argument is ignored when `bins` is an IntervalIndex. If True,
-        raises an error. When `ordered=False`, labels must be provided.
+        raises an error.
     retbins : bool, default False
         Whether to return the bins or not. Useful when bins is provided
         as a scalar.
@@ -365,15 +401,12 @@ class BinGrouper(Grouper):
             retbins=True,
         )
 
-    def _factorize_lazy(self, group: T_Group) -> DataArray:
-        def _wrapper(data, **kwargs):
-            binned, bins = self._cut(data)
-            if isinstance(self.bins, int):
-                # we are running eagerly, update self.bins with actual edges instead
-                self.bins = bins
-            return binned.codes.reshape(data.shape)
-
-        return apply_ufunc(_wrapper, group, dask="parallelized", keep_attrs=True)
+    def _pandas_cut_wrapper(self, data, **kwargs):
+        binned, bins = self._cut(data)
+        if isinstance(self.bins, int):
+            # we are running eagerly, update self.bins with actual edges instead
+            self.bins = bins
+        return binned.codes.reshape(data.shape)
 
     def factorize(self, group: T_Group) -> EncodedGroups:
         if isinstance(group, _DummyGroup):
@@ -383,7 +416,13 @@ class BinGrouper(Grouper):
             raise ValueError(
                 f"Bin edges must be provided when grouping by chunked arrays. Received {self.bins=!r} instead"
             )
-        codes = self._factorize_lazy(group)
+        codes = apply_ufunc(
+            self._pandas_cut_wrapper,
+            group,
+            dask="parallelized",
+            keep_attrs=True,
+            output_dtypes=[np.int64],
+        )
         if not by_is_chunked and array_all(codes == -1):
             raise ValueError(
                 f"None of the data falls within bins with edges {self.bins!r}"
@@ -394,8 +433,13 @@ class BinGrouper(Grouper):
 
         # This seems silly, but it lets us have Pandas handle the complexity
         # of `labels`, `precision`, and `include_lowest`, even when group is a chunked array
-        dummy, _ = self._cut(np.array([0]).astype(group.dtype))
-        full_index = dummy.categories
+        # Pandas ignores labels when IntervalIndex is passed
+        if self.labels is None or not isinstance(self.bins, pd.IntervalIndex):
+            dummy, _ = self._cut(np.array([0]).astype(group.dtype))
+            full_index = dummy.categories
+        else:
+            full_index = pd.Index(self.labels)
+
         if not by_is_chunked:
             uniques = np.sort(pd.unique(codes.data.ravel()))
             unique_values = full_index[uniques[uniques != -1]]
@@ -409,7 +453,7 @@ class BinGrouper(Grouper):
             codes=codes,
             full_index=full_index,
             unique_coord=unique_coord,
-            coords=_coordinates_from_variable(unique_coord),
+            coords=coordinates_from_variable(unique_coord),
         )
 
 
@@ -461,8 +505,6 @@ class TimeResampler(Resampler):
         )
 
     def _init_properties(self, group: T_Group) -> None:
-        from xarray import CFTimeIndex
-
         group_as_index = safe_cast_to_index(group)
         offset = self.offset
 
@@ -471,8 +513,6 @@ class TimeResampler(Resampler):
             raise ValueError("Index must be monotonic for resampling")
 
         if isinstance(group_as_index, CFTimeIndex):
-            from xarray.core.resample_cftime import CFTimeGrouper
-
             self.index_grouper = CFTimeGrouper(
                 freq=self.freq,
                 closed=self.closed,
@@ -507,9 +547,6 @@ class TimeResampler(Resampler):
         return full_index, first_items, codes
 
     def first_items(self) -> tuple[pd.Series, np.ndarray]:
-        from xarray.coding.cftimeindex import CFTimeIndex
-        from xarray.core.resample_cftime import CFTimeGrouper
-
         if isinstance(self.index_grouper, CFTimeGrouper):
             return self.index_grouper.first_items(
                 cast(CFTimeIndex, self.group_as_index)
@@ -530,7 +567,7 @@ class TimeResampler(Resampler):
         full_index, first_items, codes_ = self._get_index_and_items()
         sbins = first_items.values.astype(np.int64)
         group_indices: GroupIndices = tuple(
-            [slice(i, j) for i, j in pairwise(sbins)] + [slice(sbins[-1], None)]
+            list(itertools.starmap(slice, pairwise(sbins))) + [slice(sbins[-1], None)]
         )
 
         unique_coord = Variable(
@@ -543,8 +580,49 @@ class TimeResampler(Resampler):
             group_indices=group_indices,
             full_index=full_index,
             unique_coord=unique_coord,
-            coords=_coordinates_from_variable(unique_coord),
+            coords=coordinates_from_variable(unique_coord),
+        )
+
+    def compute_chunks(self, variable: Variable, *, dim: Hashable) -> tuple[int, ...]:
+        """
+        Compute chunk sizes for this time resampler.
+
+        This method is used during chunking operations to determine appropriate
+        chunk sizes for the given variable when using this resampler.
+
+        Parameters
+        ----------
+        name : Hashable
+            The name of the dimension being chunked.
+        variable : Variable
+            The variable being chunked.
+
+        Returns
+        -------
+        tuple[int, ...]
+            A tuple of chunk sizes for the dimension.
+        """
+        if not _contains_datetime_like_objects(variable):
+            raise ValueError(
+                f"Computing chunks with {type(self)!r} only supported for datetime variables. "
+                f"Received variable with dtype {variable.dtype!r} instead."
+            )
+
+        chunks = (
+            DataArray(
+                np.ones(variable.shape, dtype=int),
+                dims=(dim,),
+                coords={dim: variable},
+            )
+            .resample({dim: self})
+            .sum()
         )
+        # When bins (binning) or time periods are missing (resampling)
+        # we can end up with NaNs. Drop them.
+        if chunks.dtype.kind == "f":
+            chunks = chunks.dropna(dim).astype(int)
+        chunks_tuple: tuple[int, ...] = tuple(chunks.data.tolist())
+        return chunks_tuple
 
 
 def _factorize_given_labels(data: np.ndarray, labels: np.ndarray) -> np.ndarray:
@@ -586,3 +664,418 @@ def unique_value_groups(
     if isinstance(values, pd.MultiIndex):
         values.names = ar.names
     return values, inverse
+
+
+def season_to_month_tuple(seasons: Sequence[str]) -> tuple[tuple[int, ...], ...]:
+    """
+    >>> season_to_month_tuple(["DJF", "MAM", "JJA", "SON"])
+    ((12, 1, 2), (3, 4, 5), (6, 7, 8), (9, 10, 11))
+    >>> season_to_month_tuple(["DJFM", "MAMJ", "JJAS", "SOND"])
+    ((12, 1, 2, 3), (3, 4, 5, 6), (6, 7, 8, 9), (9, 10, 11, 12))
+    >>> season_to_month_tuple(["DJFM", "SOND"])
+    ((12, 1, 2, 3), (9, 10, 11, 12))
+    """
+    initials = "JFMAMJJASOND"
+    starts = {
+        "".join(s): i + 1
+        for s, i in zip(sliding_window(2, initials + "J"), range(12), strict=True)
+    }
+    result: list[tuple[int, ...]] = []
+    for i, season in enumerate(seasons):
+        if len(season) == 1:
+            if i < len(seasons) - 1:
+                suffix = seasons[i + 1][0]
+            else:
+                suffix = seasons[0][0]
+        else:
+            suffix = season[1]
+
+        start = starts[season[0] + suffix]
+
+        month_append = []
+        for i in range(len(season[1:])):
+            elem = start + i + 1
+            month_append.append(elem - 12 * (elem > 12))
+        result.append((start,) + tuple(month_append))
+    return tuple(result)
+
+
+def inds_to_season_string(asints: tuple[tuple[int, ...], ...]) -> tuple[str, ...]:
+    inits = "JFMAMJJASOND"
+    return tuple("".join([inits[i_ - 1] for i_ in t]) for t in asints)
+
+
+def is_sorted_periodic(lst):
+    """Used to verify that seasons provided to SeasonResampler are in order."""
+    n = len(lst)
+
+    # Find the wraparound point where the list decreases
+    wrap_point = -1
+    for i in range(1, n):
+        if lst[i] < lst[i - 1]:
+            wrap_point = i
+            break
+
+    # If no wraparound point is found, the list is already sorted
+    if wrap_point == -1:
+        return True
+
+    # Check if both parts around the wrap point are sorted
+    for i in range(1, wrap_point):
+        if lst[i] < lst[i - 1]:
+            return False
+    for i in range(wrap_point + 1, n):
+        if lst[i] < lst[i - 1]:
+            return False
+
+    # Check wraparound condition
+    return lst[-1] <= lst[0]
+
+
+@dataclass(kw_only=True, frozen=True)
+class SeasonsGroup:
+    seasons: tuple[str, ...]
+    # tuple[integer months] corresponding to each season
+    inds: tuple[tuple[int, ...], ...]
+    # integer code for each season, this is not simply range(len(seasons))
+    # when the seasons have overlaps
+    codes: Sequence[int]
+
+
+def find_independent_seasons(seasons: Sequence[str]) -> Sequence[SeasonsGroup]:
+    """
+    Iterates though a list of seasons e.g. ["DJF", "FMA", ...],
+    and splits that into multiple sequences of non-overlapping seasons.
+
+    >>> find_independent_seasons(
+    ...     ["DJF", "FMA", "AMJ", "JJA", "ASO", "OND"]
+    ... )  # doctest: +NORMALIZE_WHITESPACE
+    [SeasonsGroup(seasons=('DJF', 'AMJ', 'ASO'), inds=((12, 1, 2), (4, 5, 6), (8, 9, 10)), codes=[0, 2, 4]), SeasonsGroup(seasons=('FMA', 'JJA', 'OND'), inds=((2, 3, 4), (6, 7, 8), (10, 11, 12)), codes=[1, 3, 5])]
+
+    >>> find_independent_seasons(["DJF", "MAM", "JJA", "SON"])
+    [SeasonsGroup(seasons=('DJF', 'MAM', 'JJA', 'SON'), inds=((12, 1, 2), (3, 4, 5), (6, 7, 8), (9, 10, 11)), codes=[0, 1, 2, 3])]
+    """
+    season_inds = season_to_month_tuple(seasons)
+    grouped = defaultdict(list)
+    codes = defaultdict(list)
+    seen: set[tuple[int, ...]] = set()
+    # This is quadratic, but the number of seasons is at most 12
+    for i, current in enumerate(season_inds):
+        # Start with a group
+        if current not in seen:
+            grouped[i].append(current)
+            codes[i].append(i)
+            seen.add(current)
+
+        # Loop through remaining groups, and look for overlaps
+        for j, second in enumerate(season_inds[i:]):
+            if not (set(chain(*grouped[i])) & set(second)) and second not in seen:
+                grouped[i].append(second)
+                codes[i].append(j + i)
+                seen.add(second)
+        if len(seen) == len(seasons):
+            break
+        # found all non-overlapping groups for this row start over
+
+    grouped_ints = tuple(tuple(idx) for idx in grouped.values() if idx)
+    return [
+        SeasonsGroup(seasons=inds_to_season_string(inds), inds=inds, codes=codes)
+        for inds, codes in zip(grouped_ints, codes.values(), strict=False)
+    ]
+
+
+@dataclass
+class SeasonGrouper(Grouper):
+    """Allows grouping using a custom definition of seasons.
+
+    Parameters
+    ----------
+    seasons: sequence of str
+        List of strings representing seasons. E.g. ``"JF"`` or ``"JJA"`` etc.
+        Overlapping seasons are allowed (e.g. ``["DJFM", "MAMJ", "JJAS", "SOND"]``)
+
+    Examples
+    --------
+    >>> SeasonGrouper(["JF", "MAM", "JJAS", "OND"])
+    SeasonGrouper(seasons=['JF', 'MAM', 'JJAS', 'OND'])
+
+    The ordering is preserved
+
+    >>> SeasonGrouper(["MAM", "JJAS", "OND", "JF"])
+    SeasonGrouper(seasons=['MAM', 'JJAS', 'OND', 'JF'])
+
+    Overlapping seasons are allowed
+
+    >>> SeasonGrouper(["DJFM", "MAMJ", "JJAS", "SOND"])
+    SeasonGrouper(seasons=['DJFM', 'MAMJ', 'JJAS', 'SOND'])
+    """
+
+    seasons: Sequence[str]
+    # drop_incomplete: bool = field(default=True) # TODO
+
+    def factorize(self, group: T_Group) -> EncodedGroups:
+        if TYPE_CHECKING:
+            assert not isinstance(group, _DummyGroup)
+        if not _contains_datetime_like_objects(group.variable):
+            raise ValueError(
+                "SeasonGrouper can only be used to group by datetime-like arrays."
+            )
+        months = group.dt.month.data
+        seasons_groups = find_independent_seasons(self.seasons)
+        codes_ = np.full((len(seasons_groups),) + group.shape, -1, dtype=np.int8)
+        group_indices: list[list[int]] = [[]] * len(self.seasons)
+        for axis_index, seasgroup in enumerate(seasons_groups):
+            for season_tuple, code in zip(
+                seasgroup.inds, seasgroup.codes, strict=False
+            ):
+                mask = np.isin(months, season_tuple)
+                codes_[axis_index, mask] = code
+                (indices,) = mask.nonzero()
+                group_indices[code] = indices.tolist()
+
+        if np.all(codes_ == -1):
+            raise ValueError(
+                "Failed to group data. Are you grouping by a variable that is all NaN?"
+            )
+        needs_dummy_dim = len(seasons_groups) > 1
+        codes = DataArray(
+            dims=(("__season_dim__",) if needs_dummy_dim else tuple()) + group.dims,
+            data=codes_ if needs_dummy_dim else codes_.squeeze(),
+            attrs=group.attrs,
+            name="season",
+        )
+        unique_coord = Variable("season", self.seasons, attrs=group.attrs)
+        full_index = pd.Index(self.seasons)
+        return EncodedGroups(
+            codes=codes,
+            group_indices=tuple(group_indices),
+            unique_coord=unique_coord,
+            full_index=full_index,
+        )
+
+    def reset(self) -> Self:
+        return type(self)(self.seasons)
+
+
+@dataclass
+class SeasonResampler(Resampler):
+    """Allows grouping using a custom definition of seasons.
+
+    Parameters
+    ----------
+    seasons: Sequence[str]
+        An ordered list of seasons.
+    drop_incomplete: bool
+        Whether to drop seasons that are not completely included in the data.
+        For example, if a time series starts in Jan-2001, and seasons includes `"DJF"`
+        then observations from Jan-2001, and Feb-2001 are ignored in the grouping
+        since Dec-2000 isn't present.
+
+    Examples
+    --------
+    >>> SeasonResampler(["JF", "MAM", "JJAS", "OND"])
+    SeasonResampler(seasons=['JF', 'MAM', 'JJAS', 'OND'], drop_incomplete=True)
+
+    >>> SeasonResampler(["DJFM", "AM", "JJA", "SON"])
+    SeasonResampler(seasons=['DJFM', 'AM', 'JJA', 'SON'], drop_incomplete=True)
+    """
+
+    seasons: Sequence[str]
+    drop_incomplete: bool = field(default=True, kw_only=True)
+    season_inds: Sequence[Sequence[int]] = field(init=False, repr=False)
+    season_tuples: Mapping[str, Sequence[int]] = field(init=False, repr=False)
+
+    def __post_init__(self):
+        self.season_inds = season_to_month_tuple(self.seasons)
+        all_inds = functools.reduce(operator.add, self.season_inds)
+        if len(all_inds) > len(set(all_inds)):
+            raise ValueError(
+                f"Overlapping seasons are not allowed. Received {self.seasons!r}"
+            )
+        self.season_tuples = dict(zip(self.seasons, self.season_inds, strict=True))
+
+        if not is_sorted_periodic(list(itertools.chain(*self.season_inds))):
+            raise ValueError(
+                "Resampling is only supported with sorted seasons. "
+                f"Provided seasons {self.seasons!r} are not sorted."
+            )
+
+    def factorize(self, group: T_Group) -> EncodedGroups:
+        if group.ndim != 1:
+            raise ValueError(
+                "SeasonResampler can only be used to resample by 1D arrays."
+            )
+        if not isinstance(group, DataArray) or not _contains_datetime_like_objects(
+            group.variable
+        ):
+            raise ValueError(
+                "SeasonResampler can only be used to group by datetime-like DataArrays."
+            )
+
+        seasons = self.seasons
+        season_inds = self.season_inds
+        season_tuples = self.season_tuples
+
+        nstr = max(len(s) for s in seasons)
+        year = group.dt.year.astype(int)
+        month = group.dt.month.astype(int)
+        season_label = np.full(group.shape, "", dtype=f"U{nstr}")
+
+        # offset years for seasons with December and January
+        for season_str, season_ind in zip(seasons, season_inds, strict=True):
+            season_label[month.isin(season_ind)] = season_str
+            if "DJ" in season_str:
+                after_dec = season_ind[season_str.index("D") + 1 :]
+                # important: this is assuming non-overlapping seasons
+                year[month.isin(after_dec)] -= 1
+
+        # Allow users to skip one or more months?
+        # present_seasons is a mask that is True for months that are requested in the output
+        present_seasons = season_label != ""
+        if present_seasons.all():
+            # avoid copies if we can.
+            present_seasons = slice(None)
+        frame = pd.DataFrame(
+            data={
+                "index": np.arange(group[present_seasons].size),
+                "month": month[present_seasons],
+            },
+            index=pd.MultiIndex.from_arrays(
+                [year.data[present_seasons], season_label[present_seasons]],
+                names=["year", "season"],
+            ),
+        )
+
+        agged = (
+            frame["index"]
+            .groupby(["year", "season"], sort=False)
+            .agg(["first", "count"])
+        )
+        first_items = agged["first"]
+        counts = agged["count"]
+
+        index_class: type[CFTimeIndex | pd.DatetimeIndex]
+        if _contains_cftime_datetimes(group.data):
+            index_class = CFTimeIndex
+            datetime_class = type(first_n_items(group.data, 1).item())
+        else:
+            index_class = pd.DatetimeIndex
+            datetime_class = datetime.datetime
+
+        # these are the seasons that are present
+        unique_coord = index_class(
+            [
+                datetime_class(year=year, month=season_tuples[season][0], day=1)
+                for year, season in first_items.index
+            ]
+        )
+
+        # This sorted call is a hack. It's hard to figure out how
+        # to start the iteration for arbitrary season ordering
+        # for example "DJF" as first entry or last entry
+        # So we construct the largest possible index and slice it to the
+        # range present in the data.
+        complete_index = index_class(
+            sorted(
+                [
+                    datetime_class(year=y, month=m, day=1)
+                    for y, m in itertools.product(
+                        range(year[0].item(), year[-1].item() + 1),
+                        [s[0] for s in season_inds],
+                    )
+                ]
+            )
+        )
+
+        # all years and seasons
+        def get_label(year, season):
+            month, *_ = season_tuples[season]
+            return f"{year}-{month:02d}-01"
+
+        unique_codes = np.arange(len(unique_coord))
+        valid_season_mask = season_label != ""
+        first_valid_season, last_valid_season = season_label[valid_season_mask][[0, -1]]
+        first_year, last_year = year.data[[0, -1]]
+        if self.drop_incomplete:
+            if month.data[valid_season_mask][0] != season_tuples[first_valid_season][0]:
+                if "DJ" in first_valid_season:
+                    first_year += 1
+                first_valid_season = seasons[
+                    (seasons.index(first_valid_season) + 1) % len(seasons)
+                ]
+                unique_codes -= 1
+
+            if (
+                month.data[valid_season_mask][-1]
+                != season_tuples[last_valid_season][-1]
+            ):
+                last_valid_season = seasons[seasons.index(last_valid_season) - 1]
+                if "DJ" in last_valid_season:
+                    last_year -= 1
+                unique_codes[-1] = -1
+
+        first_label = get_label(first_year, first_valid_season)
+        last_label = get_label(last_year, last_valid_season)
+
+        slicer = complete_index.slice_indexer(first_label, last_label)
+        full_index = complete_index[slicer]
+
+        final_codes = np.full(group.data.size, -1)
+        final_codes[present_seasons] = np.repeat(unique_codes, counts)
+        codes = group.copy(data=final_codes, deep=False)
+
+        return EncodedGroups(codes=codes, full_index=full_index)
+
+    def compute_chunks(self, variable: Variable, *, dim: Hashable) -> tuple[int, ...]:
+        """
+        Compute chunk sizes for this season resampler.
+
+        This method is used during chunking operations to determine appropriate
+        chunk sizes for the given variable when using this resampler.
+
+        Parameters
+        ----------
+        name : Hashable
+            The name of the dimension being chunked.
+        variable : Variable
+            The variable being chunked.
+
+        Returns
+        -------
+        tuple[int, ...]
+            A tuple of chunk sizes for the dimension.
+        """
+        if not _contains_datetime_like_objects(variable):
+            raise ValueError(
+                f"Computing chunks with {type(self)!r} only supported for datetime variables. "
+                f"Received variable with dtype {variable.dtype!r} instead."
+            )
+
+        if len("".join(self.seasons)) != 12:
+            raise ValueError(
+                "Cannot rechunk with a SeasonResampler that does not cover all 12 months. "
+                f"Received `seasons={self.seasons!r}`."
+            )
+
+        # Create a temporary resampler that ignores drop_incomplete for chunking
+        # This prevents data from being silently dropped during chunking
+        resampler_for_chunking = type(self)(seasons=self.seasons, drop_incomplete=False)
+
+        chunks = (
+            DataArray(
+                np.ones(variable.shape, dtype=int),
+                dims=(dim,),
+                coords={dim: variable},
+            )
+            .resample({dim: resampler_for_chunking})
+            .sum()
+        )
+        # When bins (binning) or time periods are missing (resampling)
+        # we can end up with NaNs. Drop them.
+        if chunks.dtype.kind == "f":
+            chunks = chunks.dropna(dim).astype(int)
+        chunks_tuple: tuple[int, ...] = tuple(chunks.data.tolist())
+        return chunks_tuple
+
+    def reset(self) -> Self:
+        return type(self)(seasons=self.seasons, drop_incomplete=self.drop_incomplete)
diff -pruN 2025.03.1-8/xarray/indexes/__init__.py 2025.10.1-1/xarray/indexes/__init__.py
--- 2025.03.1-8/xarray/indexes/__init__.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/xarray/indexes/__init__.py	2025-10-10 10:38:05.000000000 +0000
@@ -3,10 +3,23 @@ DataArray objects.
 
 """
 
+from xarray.core.coordinate_transform import CoordinateTransform
 from xarray.core.indexes import (
+    CoordinateTransformIndex,
     Index,
     PandasIndex,
     PandasMultiIndex,
 )
+from xarray.indexes.nd_point_index import NDPointIndex, TreeAdapter
+from xarray.indexes.range_index import RangeIndex
 
-__all__ = ["Index", "PandasIndex", "PandasMultiIndex"]
+__all__ = [
+    "CoordinateTransform",
+    "CoordinateTransformIndex",
+    "Index",
+    "NDPointIndex",
+    "PandasIndex",
+    "PandasMultiIndex",
+    "RangeIndex",
+    "TreeAdapter",
+]
diff -pruN 2025.03.1-8/xarray/indexes/nd_point_index.py 2025.10.1-1/xarray/indexes/nd_point_index.py
--- 2025.03.1-8/xarray/indexes/nd_point_index.py	1970-01-01 00:00:00.000000000 +0000
+++ 2025.10.1-1/xarray/indexes/nd_point_index.py	2025-10-10 10:38:05.000000000 +0000
@@ -0,0 +1,398 @@
+from __future__ import annotations
+
+import abc
+from collections.abc import Hashable, Iterable, Mapping
+from typing import TYPE_CHECKING, Any, Generic, TypeVar
+
+import numpy as np
+
+from xarray.core.dataarray import DataArray
+from xarray.core.indexes import Index
+from xarray.core.indexing import IndexSelResult
+from xarray.core.utils import is_scalar
+from xarray.core.variable import Variable
+from xarray.structure.alignment import broadcast
+
+if TYPE_CHECKING:
+    from scipy.spatial import KDTree
+
+    from xarray.core.types import Self
+
+
+class TreeAdapter(abc.ABC):
+    """Lightweight adapter abstract class for plugging in 3rd-party structures
+    like :py:class:`scipy.spatial.KDTree` or :py:class:`sklearn.neighbors.KDTree`
+    into :py:class:`~xarray.indexes.NDPointIndex`.
+
+    """
+
+    @abc.abstractmethod
+    def __init__(self, points: np.ndarray, *, options: Mapping[str, Any]):
+        """
+        Parameters
+        ----------
+        points : ndarray of shape (n_points, n_coordinates)
+            Two-dimensional array of points/samples (rows) and their
+            corresponding coordinate labels (columns) to index.
+        """
+        ...
+
+    @abc.abstractmethod
+    def query(self, points: np.ndarray) -> tuple[np.ndarray, np.ndarray]:
+        """Query points.
+
+        Parameters
+        ----------
+        points: ndarray of shape (n_points, n_coordinates)
+            Two-dimensional array of points/samples (rows) and their
+            corresponding coordinate labels (columns) to query.
+
+        Returns
+        -------
+        distances : ndarray of shape (n_points)
+            Distances to the nearest neighbors.
+        indices : ndarray of shape (n_points)
+            Indices of the nearest neighbors in the array of the indexed
+            points.
+        """
+        ...
+
+    def equals(self, other: Self) -> bool:
+        """Check equality with another TreeAdapter of the same kind.
+
+        Parameters
+        ----------
+        other :
+            The other TreeAdapter object to compare with this object.
+
+        """
+        raise NotImplementedError
+
+
+class ScipyKDTreeAdapter(TreeAdapter):
+    """:py:class:`scipy.spatial.KDTree` adapter for :py:class:`~xarray.indexes.NDPointIndex`."""
+
+    _kdtree: KDTree
+
+    def __init__(self, points: np.ndarray, options: Mapping[str, Any]):
+        from scipy.spatial import KDTree
+
+        self._kdtree = KDTree(points, **options)
+
+    def query(self, points: np.ndarray) -> tuple[np.ndarray, np.ndarray]:
+        return self._kdtree.query(points)  # type: ignore[return-value,unused-ignore]
+
+    def equals(self, other: Self) -> bool:
+        return np.array_equal(self._kdtree.data, other._kdtree.data)
+
+
+def get_points(coords: Iterable[Variable | Any]) -> np.ndarray:
+    """Re-arrange data from a sequence of xarray coordinate variables or
+    labels into a 2-d array of shape (n_points, n_coordinates).
+
+    """
+    data = [c.values if isinstance(c, Variable | DataArray) else c for c in coords]
+    return np.stack([np.ravel(d) for d in data]).T
+
+
+T_TreeAdapter = TypeVar("T_TreeAdapter", bound=TreeAdapter)
+
+
+class NDPointIndex(Index, Generic[T_TreeAdapter]):
+    """Xarray index for irregular, n-dimensional data.
+
+    This index may be associated with a set of coordinate variables representing
+    the arbitrary location of data points in an n-dimensional space. All
+    coordinates must have the same shape and dimensions. The number of
+    associated coordinate variables must correspond to the number of dimensions
+    of the space.
+
+    This index supports label-based selection (nearest neighbor lookup). It also
+    has limited support for alignment.
+
+    By default, this index relies on :py:class:`scipy.spatial.KDTree` for fast
+    lookup.
+
+    Do not use :py:meth:`~xarray.indexes.NDPointIndex.__init__` directly. Instead
+    use :py:meth:`xarray.Dataset.set_xindex` or
+    :py:meth:`xarray.DataArray.set_xindex` to create and set the index from
+    existing coordinates (see the example below).
+
+    Examples
+    --------
+    An example using a dataset with 2-dimensional coordinates.
+
+    >>> xx = [[1.0, 2.0], [3.0, 0.0]]
+    >>> yy = [[11.0, 21.0], [29.0, 9.0]]
+    >>> ds = xr.Dataset(coords={"xx": (("y", "x"), xx), "yy": (("y", "x"), yy)})
+    >>> ds
+    <xarray.Dataset> Size: 64B
+    Dimensions:  (y: 2, x: 2)
+    Coordinates:
+        xx       (y, x) float64 32B 1.0 2.0 3.0 0.0
+        yy       (y, x) float64 32B 11.0 21.0 29.0 9.0
+    Dimensions without coordinates: y, x
+    Data variables:
+        *empty*
+
+    Creation of a NDPointIndex from the "xx" and "yy" coordinate variables:
+
+    >>> ds = ds.set_xindex(("xx", "yy"), xr.indexes.NDPointIndex)
+    >>> ds
+    <xarray.Dataset> Size: 64B
+    Dimensions:  (y: 2, x: 2)
+    Coordinates:
+      * xx       (y, x) float64 32B 1.0 2.0 3.0 0.0
+      * yy       (y, x) float64 32B 11.0 21.0 29.0 9.0
+    Dimensions without coordinates: y, x
+    Data variables:
+        *empty*
+    Indexes:
+      ┌ xx       NDPointIndex (ScipyKDTreeAdapter)
+      └ yy
+
+    Point-wise (nearest-neighbor) data selection using Xarray's advanced
+    indexing, i.e., using arbitrary dimension(s) for the Variable objects passed
+    as labels:
+
+    >>> ds.sel(
+    ...     xx=xr.Variable("points", [1.9, 0.1]),
+    ...     yy=xr.Variable("points", [13.0, 8.0]),
+    ...     method="nearest",
+    ... )
+    <xarray.Dataset> Size: 32B
+    Dimensions:  (points: 2)
+    Coordinates:
+        xx       (points) float64 16B 1.0 0.0
+        yy       (points) float64 16B 11.0 9.0
+    Dimensions without coordinates: points
+    Data variables:
+        *empty*
+
+    Data selection with scalar labels:
+
+    >>> ds.sel(xx=1.9, yy=13.0, method="nearest")
+    <xarray.Dataset> Size: 16B
+    Dimensions:  ()
+    Coordinates:
+        xx       float64 8B 1.0
+        yy       float64 8B 11.0
+    Data variables:
+        *empty*
+
+    Data selection with broadcasting the input labels:
+
+    >>> ds.sel(xx=1.9, yy=xr.Variable("points", [13.0, 8.0]), method="nearest")
+    <xarray.Dataset> Size: 32B
+    Dimensions:  (points: 2)
+    Coordinates:
+        xx       (points) float64 16B 1.0 0.0
+        yy       (points) float64 16B 11.0 9.0
+    Dimensions without coordinates: points
+    Data variables:
+        *empty*
+
+    >>> da = xr.DataArray(
+    ...     [[45.1, 53.3], [65.4, 78.2]],
+    ...     coords={"u": [1.9, 0.1], "v": [13.0, 8.0]},
+    ...     dims=("u", "v"),
+    ... )
+    >>> ds.sel(xx=da.u, yy=da.v, method="nearest")
+    <xarray.Dataset> Size: 64B
+    Dimensions:  (u: 2, v: 2)
+    Coordinates:
+        xx       (u, v) float64 32B 1.0 0.0 1.0 0.0
+        yy       (u, v) float64 32B 11.0 9.0 11.0 9.0
+    Dimensions without coordinates: u, v
+    Data variables:
+        *empty*
+
+    Data selection with array-like labels (implicit dimensions):
+
+    >>> ds.sel(xx=[[1.9], [0.1]], yy=[[13.0], [8.0]], method="nearest")
+    <xarray.Dataset> Size: 32B
+    Dimensions:  (y: 2, x: 1)
+    Coordinates:
+        xx       (y, x) float64 16B 1.0 0.0
+        yy       (y, x) float64 16B 11.0 9.0
+    Dimensions without coordinates: y, x
+    Data variables:
+        *empty*
+
+    """
+
+    _tree_obj: T_TreeAdapter
+    _coord_names: tuple[Hashable, ...]
+    _dims: tuple[Hashable, ...]
+    _shape: tuple[int, ...]
+
+    def __init__(
+        self,
+        tree_obj: T_TreeAdapter,
+        *,
+        coord_names: tuple[Hashable, ...],
+        dims: tuple[Hashable, ...],
+        shape: tuple[int, ...],
+    ):
+        # this constructor is "private"
+        assert isinstance(tree_obj, TreeAdapter)
+        self._tree_obj = tree_obj
+
+        assert len(coord_names) == len(dims) == len(shape)
+        self._coord_names = coord_names
+        self._dims = dims
+        self._shape = shape
+
+    @classmethod
+    def from_variables(
+        cls,
+        variables: Mapping[Any, Variable],
+        *,
+        options: Mapping[str, Any],
+    ) -> Self:
+        if len({var.dims for var in variables.values()}) > 1:
+            var_names = ",".join(vn for vn in variables)
+            raise ValueError(
+                f"variables {var_names} must all have the same dimensions and the same shape"
+            )
+
+        var0 = next(iter(variables.values()))
+
+        if len(variables) != len(var0.dims):
+            raise ValueError(
+                f"the number of variables {len(variables)} doesn't match "
+                f"the number of dimensions {len(var0.dims)}"
+            )
+
+        opts = dict(options)
+
+        tree_adapter_cls: type[T_TreeAdapter] = opts.pop("tree_adapter_cls", None)
+        if tree_adapter_cls is None:
+            tree_adapter_cls = ScipyKDTreeAdapter
+
+        points = get_points(variables.values())
+
+        return cls(
+            tree_adapter_cls(points, options=opts),
+            coord_names=tuple(variables),
+            dims=var0.dims,
+            shape=var0.shape,
+        )
+
+    def create_variables(
+        self, variables: Mapping[Any, Variable] | None = None
+    ) -> dict[Any, Variable]:
+        if variables is not None:
+            for var in variables.values():
+                # maybe re-sync variable dimensions with the index object
+                # returned by NDPointIndex.rename()
+                if var.dims != self._dims:
+                    var.dims = self._dims
+            return dict(**variables)
+        else:
+            return {}
+
+    def equals(
+        self, other: Index, *, exclude: frozenset[Hashable] | None = None
+    ) -> bool:
+        if not isinstance(other, NDPointIndex):
+            return False
+        if type(self._tree_obj) is not type(other._tree_obj):
+            return False
+        return self._tree_obj.equals(other._tree_obj)
+
+    def _get_dim_indexers(
+        self,
+        indices: np.ndarray,
+        label_dims: tuple[Hashable, ...],
+        label_shape: tuple[int, ...],
+    ) -> dict[Hashable, Variable]:
+        """Returns dimension indexers based on the query results (indices) and
+        the original label dimensions and shape.
+
+        1. Unravel the flat indices returned from the query
+        2. Reshape the unraveled indices according to indexers shapes
+        3. Wrap the indices in xarray.Variable objects.
+
+        """
+        dim_indexers = {}
+
+        u_indices = list(np.unravel_index(indices.ravel(), self._shape))
+
+        for dim, ind in zip(self._dims, u_indices, strict=False):
+            dim_indexers[dim] = Variable(label_dims, ind.reshape(label_shape))
+
+        return dim_indexers
+
+    def sel(
+        self, labels: dict[Any, Any], method=None, tolerance=None
+    ) -> IndexSelResult:
+        if method != "nearest":
+            raise ValueError(
+                "NDPointIndex only supports selection with method='nearest'"
+            )
+
+        missing_labels = set(self._coord_names) - set(labels)
+        if missing_labels:
+            missing_labels_str = ",".join([f"{name}" for name in missing_labels])
+            raise ValueError(f"missing labels for coordinate(s): {missing_labels_str}.")
+
+        # maybe convert labels into xarray DataArray objects
+        xr_labels: dict[Any, DataArray] = {}
+
+        for name, lbl in labels.items():
+            if isinstance(lbl, DataArray):
+                xr_labels[name] = lbl
+            elif isinstance(lbl, Variable):
+                xr_labels[name] = DataArray(lbl)
+            elif is_scalar(lbl):
+                xr_labels[name] = DataArray(lbl, dims=())
+            elif np.asarray(lbl).ndim == len(self._dims):
+                xr_labels[name] = DataArray(lbl, dims=self._dims)
+            else:
+                raise ValueError(
+                    "invalid label value. NDPointIndex only supports advanced (point-wise) indexing "
+                    "with the following label value kinds:\n"
+                    "- xarray.DataArray or xarray.Variable objects\n"
+                    "- scalar values\n"
+                    "- unlabelled array-like objects with the same number of dimensions "
+                    f"than the {self._coord_names} coordinate variables ({len(self._dims)})"
+                )
+
+        # broadcast xarray labels against one another and determine labels shape and dimensions
+        broadcasted = broadcast(*xr_labels.values())
+        label_dims = broadcasted[0].dims
+        label_shape = broadcasted[0].shape
+        xr_labels = dict(zip(xr_labels, broadcasted, strict=True))
+
+        # get and return dimension indexers
+        points = get_points(xr_labels[name] for name in self._coord_names)
+        _, indices = self._tree_obj.query(points)
+
+        dim_indexers = self._get_dim_indexers(indices, label_dims, label_shape)
+
+        return IndexSelResult(dim_indexers=dim_indexers)
+
+    def rename(
+        self,
+        name_dict: Mapping[Any, Hashable],
+        dims_dict: Mapping[Any, Hashable],
+    ) -> Self:
+        if not set(self._coord_names) & set(name_dict) and not set(self._dims) & set(
+            dims_dict
+        ):
+            return self
+
+        new_coord_names = tuple(name_dict.get(n, n) for n in self._coord_names)
+        new_dims = tuple(dims_dict.get(d, d) for d in self._dims)
+
+        return type(self)(
+            self._tree_obj,
+            coord_names=new_coord_names,
+            dims=new_dims,
+            shape=self._shape,
+        )
+
+    def _repr_inline_(self, max_width: int) -> str:
+        tree_obj_type = self._tree_obj.__class__.__name__
+        return f"{self.__class__.__name__} ({tree_obj_type})"
diff -pruN 2025.03.1-8/xarray/indexes/range_index.py 2025.10.1-1/xarray/indexes/range_index.py
--- 2025.03.1-8/xarray/indexes/range_index.py	1970-01-01 00:00:00.000000000 +0000
+++ 2025.10.1-1/xarray/indexes/range_index.py	2025-10-10 10:38:05.000000000 +0000
@@ -0,0 +1,416 @@
+import math
+from collections.abc import Hashable, Mapping
+from typing import Any
+
+import numpy as np
+import pandas as pd
+
+from xarray.core import duck_array_ops
+from xarray.core.coordinate_transform import CoordinateTransform
+from xarray.core.dataarray import DataArray
+from xarray.core.indexes import CoordinateTransformIndex, Index, PandasIndex
+from xarray.core.indexing import IndexSelResult
+from xarray.core.variable import Variable
+
+
+class RangeCoordinateTransform(CoordinateTransform):
+    """1-dimensional coordinate transform representing a simple bounded interval
+    with evenly spaced, floating-point values.
+    """
+
+    start: float
+    stop: float
+    _step: float | None
+
+    __slots__ = ("_step", "start", "stop")
+
+    def __init__(
+        self,
+        start: float,
+        stop: float,
+        size: int,
+        coord_name: Hashable,
+        dim: str,
+        dtype: Any = None,
+    ):
+        if dtype is None:
+            dtype = np.dtype(np.float64)
+
+        super().__init__([coord_name], {dim: size}, dtype=dtype)
+
+        self.start = start
+        self.stop = stop
+        self._step = None  # Will be calculated by property
+
+    @property
+    def coord_name(self) -> Hashable:
+        return self.coord_names[0]
+
+    @property
+    def dim(self) -> str:
+        return self.dims[0]
+
+    @property
+    def size(self) -> int:
+        return self.dim_size[self.dim]
+
+    @property
+    def step(self) -> float:
+        if self._step is not None:
+            return self._step
+        if self.size > 0:
+            return (self.stop - self.start) / self.size
+        else:
+            # For empty arrays, default to 1.0
+            return 1.0
+
+    def forward(self, dim_positions: dict[str, Any]) -> dict[Hashable, Any]:
+        positions = dim_positions[self.dim]
+        labels = self.start + positions * self.step
+        return {self.coord_name: labels}
+
+    def reverse(self, coord_labels: dict[Hashable, Any]) -> dict[str, Any]:
+        labels = coord_labels[self.coord_name]
+        positions = (labels - self.start) / self.step
+        return {self.dim: positions}
+
+    def equals(
+        self, other: CoordinateTransform, exclude: frozenset[Hashable] | None = None
+    ) -> bool:
+        if not isinstance(other, RangeCoordinateTransform):
+            return False
+
+        return (
+            self.start == other.start
+            and self.stop == other.stop
+            and self.size == other.size
+        )
+
+    def slice(self, sl: slice) -> "RangeCoordinateTransform":
+        new_range = range(self.size)[sl]
+        new_size = len(new_range)
+
+        new_start = self.start + new_range.start * self.step
+        new_stop = self.start + new_range.stop * self.step
+
+        result = type(self)(
+            new_start,
+            new_stop,
+            new_size,
+            self.coord_name,
+            self.dim,
+            dtype=self.dtype,
+        )
+        if new_size == 0:
+            # For empty slices, preserve step from parent
+            result._step = self.step
+        return result
+
+
+class RangeIndex(CoordinateTransformIndex):
+    """Xarray index implementing a simple bounded 1-dimension interval with
+    evenly spaced, monotonic floating-point values.
+
+    This index is memory-saving, i.e., the values of its associated coordinate
+    variable are not materialized in memory.
+
+    Do not use :py:meth:`~xarray.indexes.RangeIndex.__init__` directly. Instead
+    use :py:meth:`~xarray.indexes.RangeIndex.arange` or
+    :py:meth:`~xarray.indexes.RangeIndex.linspace`, which are similar to
+    :py:func:`numpy.arange` and :py:func:`numpy.linspace`.
+
+    In the case of a monotonic integer range, it is better using a
+    :py:class:`~xarray.indexes.PandasIndex` that wraps a
+    :py:class:`pandas.RangeIndex`.
+
+    """
+
+    transform: RangeCoordinateTransform
+
+    def __init__(self, transform: RangeCoordinateTransform):
+        super().__init__(transform)
+
+    @classmethod
+    def arange(
+        cls,
+        start: float | None = None,
+        stop: float | None = None,
+        step: float | None = None,
+        *,
+        coord_name: Hashable | None = None,
+        dim: str,
+        dtype: Any = None,
+    ) -> "RangeIndex":
+        """Create a new RangeIndex from given start, stop and step values.
+
+        ``RangeIndex.arange`` can be called with a varying number of positional arguments:
+
+        - ``RangeIndex.arange(stop)``: the index is within the half-open interval [0, stop)
+          (in other words, the interval including start but excluding stop).
+
+        - ``RangeIndex.arange(start, stop)``: the index is within the half-open interval
+          [start, stop).
+
+        - ``RangeIndex.arange(start, stop, step)``: the index is within the half-open interval
+          [start, stop), with spacing between values given by step.
+
+        .. note::
+           When using a non-integer step, such as 0.1, it is often better to use
+           :py:meth:`~xarray.indexes.RangeIndex.linspace`.
+
+        .. note::
+           ``RangeIndex.arange(start=4.0)`` returns a range index in the [0.0, 4.0)
+           interval, i.e., ``start`` is interpreted as ``stop`` even when it is given
+           as a unique keyword argument.
+
+        Parameters
+        ----------
+        start : float, optional
+            Start of interval. The interval includes this value. The default start
+            value is 0. If ``stop`` is not given, the value given here is interpreted
+            as the end of the interval.
+        stop : float
+            End of interval. In general the interval does not include this value,
+            except floating point round-off affects the size of the dimension.
+        step : float, optional
+            Spacing between values (default: 1.0).
+        coord_name : Hashable, optional
+            Name of the (lazy) coordinate variable that will be created and
+            associated with the new index. If ``None``, the coordinate is named
+            as the dimension name.
+        dim : str
+            Dimension name.
+        dtype : dtype, optional
+            The dtype of the coordinate variable (default: float64).
+
+        Examples
+        --------
+        >>> from xarray.indexes import RangeIndex
+
+        >>> index = RangeIndex.arange(0.0, 1.0, 0.2, dim="x")
+        >>> ds = xr.Dataset(coords=xr.Coordinates.from_xindex(index))
+
+        >>> ds
+        <xarray.Dataset> Size: 40B
+        Dimensions:  (x: 5)
+        Coordinates:
+          * x        (x) float64 40B 0.0 0.2 0.4 0.6 0.8
+        Data variables:
+            *empty*
+        Indexes:
+            x        RangeIndex (start=0, stop=1, step=0.2)
+
+        """
+        if stop is None:
+            if start is None:
+                raise TypeError("RangeIndex.arange() requires stop to be specified")
+            else:
+                stop = start
+                start = None
+        if start is None:
+            start = 0.0
+
+        if step is None:
+            step = 1.0
+
+        if coord_name is None:
+            coord_name = dim
+
+        size = math.ceil((stop - start) / step)
+
+        transform = RangeCoordinateTransform(
+            start, stop, size, coord_name, dim, dtype=dtype
+        )
+
+        return cls(transform)
+
+    @classmethod
+    def linspace(
+        cls,
+        start: float,
+        stop: float,
+        num: int = 50,
+        endpoint: bool = True,
+        *,
+        coord_name: Hashable | None = None,
+        dim: str,
+        dtype: Any = None,
+    ) -> "RangeIndex":
+        """Create a new RangeIndex from given start / stop values and number of
+        values.
+
+        Parameters
+        ----------
+        start : float
+            Start of interval. The interval includes this value.
+        stop : float, optional
+            End of interval. The interval includes this value if ``endpoint=True``.
+        num : float, optional
+            Number of values in the interval, i.e., dimension size (default: 50).
+        endpoint : bool, optional
+            If True (default), the ``stop`` value is included in the interval.
+        coord_name : Hashable, optional
+            Name of the (lazy) coordinate variable that will be created and
+            associated with the new index. If ``None``, the coordinate is named
+            as the dimension name.
+        dim : str
+            Dimension name.
+        dtype : dtype, optional
+            The dtype of the coordinate variable (default: float64).
+
+        Examples
+        --------
+        >>> from xarray.indexes import RangeIndex
+
+        >>> index = RangeIndex.linspace(0.0, 1.0, 5, dim="x")
+        >>> ds = xr.Dataset(coords=xr.Coordinates.from_xindex(index))
+
+        >>> ds
+        <xarray.Dataset> Size: 40B
+        Dimensions:  (x: 5)
+        Coordinates:
+          * x        (x) float64 40B 0.0 0.25 0.5 0.75 1.0
+        Data variables:
+            *empty*
+        Indexes:
+            x        RangeIndex (start=0, stop=1.25, step=0.25)
+
+        """
+        if coord_name is None:
+            coord_name = dim
+
+        if endpoint:
+            stop += (stop - start) / (num - 1)
+
+        transform = RangeCoordinateTransform(
+            start, stop, num, coord_name, dim, dtype=dtype
+        )
+
+        return cls(transform)
+
+    @classmethod
+    def from_variables(
+        cls,
+        variables: Mapping[Any, Variable],
+        *,
+        options: Mapping[str, Any],
+    ) -> "RangeIndex":
+        raise NotImplementedError(
+            "cannot create a new RangeIndex from an existing coordinate. Use instead "
+            "either `RangeIndex.arange()` or `RangeIndex.linspace()` together with "
+            "`Coordinates.from_xindex()`"
+        )
+
+    @property
+    def start(self) -> float:
+        """Returns the start of the interval (the interval includes this value)."""
+        return self.transform.start
+
+    @property
+    def stop(self) -> float:
+        """Returns the end of the interval (the interval does not include this value)."""
+        return self.transform.stop
+
+    @property
+    def step(self) -> float:
+        """Returns the spacing between values."""
+        return self.transform.step
+
+    @property
+    def coord_name(self) -> Hashable:
+        return self.transform.coord_names[0]
+
+    @property
+    def dim(self) -> str:
+        return self.transform.dims[0]
+
+    @property
+    def size(self) -> int:
+        return self.transform.dim_size[self.dim]
+
+    def isel(
+        self, indexers: Mapping[Any, int | slice | np.ndarray | Variable]
+    ) -> Index | None:
+        idxer = indexers[self.dim]
+
+        if isinstance(idxer, slice):
+            return RangeIndex(self.transform.slice(idxer))
+        elif (isinstance(idxer, Variable) and idxer.ndim > 1) or duck_array_ops.ndim(
+            idxer
+        ) == 0:
+            return None
+        else:
+            values = self.transform.forward({self.dim: np.asarray(idxer)})[
+                self.coord_name
+            ]
+            if isinstance(idxer, Variable):
+                new_dim = idxer.dims[0]
+            else:
+                new_dim = self.dim
+            pd_index = pd.Index(values, name=self.coord_name)
+            return PandasIndex(pd_index, new_dim, coord_dtype=values.dtype)
+
+    def sel(
+        self, labels: dict[Any, Any], method=None, tolerance=None
+    ) -> IndexSelResult:
+        label = labels[self.dim]
+
+        if method != "nearest":
+            raise ValueError("RangeIndex only supports selection with method='nearest'")
+
+        # TODO: for RangeIndex it might not be too hard to support tolerance
+        if tolerance is not None:
+            raise ValueError(
+                "RangeIndex doesn't support selection with a given tolerance value yet"
+            )
+
+        if isinstance(label, slice):
+            if label.step is None:
+                # continuous interval slice indexing (preserves the index)
+                positions = self.transform.reverse(
+                    {self.coord_name: np.array([label.start, label.stop])}
+                )
+                pos = np.round(positions[self.dim]).astype("int")
+                new_start = max(pos[0], 0)
+                new_stop = min(pos[1], self.size)
+                return IndexSelResult({self.dim: slice(new_start, new_stop)})
+            else:
+                # otherwise convert to basic (array) indexing
+                label = np.arange(label.start, label.stop, label.step)
+
+        # support basic indexing (in the 1D case basic vs. vectorized indexing
+        # are pretty much similar)
+        unwrap_xr = False
+        if not isinstance(label, Variable | DataArray):
+            # basic indexing -> either scalar or 1-d array
+            try:
+                var = Variable("_", label)
+            except ValueError:
+                var = Variable((), label)
+            labels = {self.dim: var}
+            unwrap_xr = True
+
+        result = super().sel(labels, method=method, tolerance=tolerance)
+
+        if unwrap_xr:
+            dim_indexers = {self.dim: result.dim_indexers[self.dim].values}
+            result = IndexSelResult(dim_indexers)
+
+        return result
+
+    def to_pandas_index(self) -> pd.Index:
+        values = self.transform.generate_coords()
+        return pd.Index(values[self.dim])
+
+    def _repr_inline_(self, max_width) -> str:
+        params_fmt = (
+            f"start={self.start:.3g}, stop={self.stop:.3g}, step={self.step:.3g}"
+        )
+        return f"{self.__class__.__name__} ({params_fmt})"
+
+    def __repr__(self) -> str:
+        params_fmt = (
+            f"start={self.start:.3g}, stop={self.stop:.3g}, step={self.step:.3g}, "
+            f"size={self.size}, coord_name={self.coord_name!r}, dim={self.dim!r}"
+        )
+        return f"{self.__class__.__name__} ({params_fmt})"
diff -pruN 2025.03.1-8/xarray/namedarray/_aggregations.py 2025.10.1-1/xarray/namedarray/_aggregations.py
--- 2025.03.1-8/xarray/namedarray/_aggregations.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/xarray/namedarray/_aggregations.py	2025-10-10 10:38:05.000000000 +0000
@@ -352,10 +352,6 @@ class NamedArrayAggregations:
         :ref:`agg`
             User guide on reduction or aggregation operations.
 
-        Notes
-        -----
-        Non-numeric variables will be removed prior to reducing.
-
         Examples
         --------
         >>> from xarray.namedarray.core import NamedArray
diff -pruN 2025.03.1-8/xarray/namedarray/_typing.py 2025.10.1-1/xarray/namedarray/_typing.py
--- 2025.03.1-8/xarray/namedarray/_typing.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/xarray/namedarray/_typing.py	2025-10-10 10:38:05.000000000 +0000
@@ -1,6 +1,5 @@
 from __future__ import annotations
 
-import sys
 from collections.abc import Callable, Hashable, Iterable, Mapping, Sequence
 from enum import Enum
 from types import EllipsisType, ModuleType
@@ -20,10 +19,7 @@ from typing import (
 import numpy as np
 
 try:
-    if sys.version_info >= (3, 11):
-        from typing import TypeAlias
-    else:
-        from typing import TypeAlias
+    from typing import TypeAlias
 except ImportError:
     if TYPE_CHECKING:
         raise
@@ -39,7 +35,6 @@ class Default(Enum):
 _default = Default.token
 
 # https://stackoverflow.com/questions/74633074/how-to-type-hint-a-generic-numpy-array
-_T = TypeVar("_T")
 _T_co = TypeVar("_T_co", covariant=True)
 
 _dtype = np.dtype
@@ -79,7 +74,7 @@ _Chunks = tuple[_Shape, ...]
 _NormalizedChunks = tuple[tuple[int, ...], ...]
 # FYI in some cases we don't allow `None`, which this doesn't take account of.
 # # FYI the `str` is for a size string, e.g. "16MB", supported by dask.
-T_ChunkDim: TypeAlias = str | int | Literal["auto"] | None | tuple[int, ...]
+T_ChunkDim: TypeAlias = str | int | Literal["auto"] | tuple[int, ...] | None  # noqa: PYI051
 # We allow the tuple form of this (though arguably we could transition to named dims only)
 T_Chunks: TypeAlias = T_ChunkDim | Mapping[Any, T_ChunkDim]
 
diff -pruN 2025.03.1-8/xarray/namedarray/core.py 2025.10.1-1/xarray/namedarray/core.py
--- 2025.03.1-8/xarray/namedarray/core.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/xarray/namedarray/core.py	2025-10-10 10:38:05.000000000 +0000
@@ -2,9 +2,9 @@ from __future__ import annotations
 
 import copy
 import math
-import sys
 import warnings
 from collections.abc import Callable, Hashable, Iterable, Mapping, Sequence
+from itertools import starmap
 from types import EllipsisType
 from typing import (
     TYPE_CHECKING,
@@ -85,10 +85,7 @@ if TYPE_CHECKING:
         PostComputeCallable: Any  # type: ignore[no-redef]
         PostPersistCallable: Any  # type: ignore[no-redef]
 
-    if sys.version_info >= (3, 11):
-        from typing import Self
-    else:
-        from typing_extensions import Self
+    from typing import Self
 
     T_NamedArray = TypeVar("T_NamedArray", bound="_NamedArray[Any]")
     T_NamedArrayInteger = TypeVar(
@@ -834,6 +831,7 @@ class NamedArray(NamedArrayAggregations,
         if chunkmanager.is_chunked_array(data_old):
             data_chunked = chunkmanager.rechunk(data_old, chunks)  # type: ignore[arg-type]
         else:
+            ndata: duckarray[Any, Any]
             if not isinstance(data_old, ExplicitlyIndexed):
                 ndata = data_old
             else:
@@ -848,7 +846,7 @@ class NamedArray(NamedArrayAggregations,
                 ndata = ImplicitToExplicitIndexingAdapter(data_old, OuterIndexer)  # type: ignore[assignment]
 
             if is_dict_like(chunks):
-                chunks = tuple(chunks.get(n, s) for n, s in enumerate(ndata.shape))
+                chunks = tuple(starmap(chunks.get, enumerate(ndata.shape)))
 
             data_chunked = chunkmanager.from_array(ndata, chunks, **from_array_kwargs)  # type: ignore[arg-type]
 
diff -pruN 2025.03.1-8/xarray/namedarray/dtypes.py 2025.10.1-1/xarray/namedarray/dtypes.py
--- 2025.03.1-8/xarray/namedarray/dtypes.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/xarray/namedarray/dtypes.py	2025-10-10 10:38:05.000000000 +0000
@@ -13,19 +13,19 @@ NA = utils.ReprObject("<NA>")
 
 @functools.total_ordering
 class AlwaysGreaterThan:
-    def __gt__(self, other: Any) -> Literal[True]:
+    def __gt__(self, other: object) -> Literal[True]:
         return True
 
-    def __eq__(self, other: Any) -> bool:
+    def __eq__(self, other: object) -> bool:
         return isinstance(other, type(self))
 
 
 @functools.total_ordering
 class AlwaysLessThan:
-    def __lt__(self, other: Any) -> Literal[True]:
+    def __lt__(self, other: object) -> Literal[True]:
         return True
 
-    def __eq__(self, other: Any) -> bool:
+    def __eq__(self, other: object) -> bool:
         return isinstance(other, type(self))
 
 
@@ -165,7 +165,7 @@ def is_datetime_like(
 
 
 def result_type(
-    *arrays_and_dtypes: np.typing.ArrayLike | np.typing.DTypeLike,
+    *arrays_and_dtypes: np.typing.ArrayLike | np.typing.DTypeLike | None,
 ) -> np.dtype[np.generic]:
     """Like np.result_type, but with type promotion rules matching pandas.
 
diff -pruN 2025.03.1-8/xarray/namedarray/pycompat.py 2025.10.1-1/xarray/namedarray/pycompat.py
--- 2025.03.1-8/xarray/namedarray/pycompat.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/xarray/namedarray/pycompat.py	2025-10-10 10:38:05.000000000 +0000
@@ -145,3 +145,17 @@ def to_duck_array(data: Any, **kwargs: d
         return data
     else:
         return np.asarray(data)  # type: ignore[return-value]
+
+
+async def async_to_duck_array(
+    data: Any, **kwargs: dict[str, Any]
+) -> duckarray[_ShapeType, _DType]:
+    from xarray.core.indexing import (
+        ExplicitlyIndexed,
+        ImplicitToExplicitIndexingAdapter,
+    )
+
+    if isinstance(data, ExplicitlyIndexed | ImplicitToExplicitIndexingAdapter):
+        return await data.async_get_duck_array()  # type: ignore[union-attr, no-any-return]
+    else:
+        return to_duck_array(data, **kwargs)
diff -pruN 2025.03.1-8/xarray/plot/dataarray_plot.py 2025.10.1-1/xarray/plot/dataarray_plot.py
--- 2025.03.1-8/xarray/plot/dataarray_plot.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/xarray/plot/dataarray_plot.py	2025-10-10 10:38:05.000000000 +0000
@@ -196,7 +196,13 @@ def _prepare_plot1d_data(
                 dim = coords_to_plot.get(v, None)
                 if (dim is not None) and (dim in darray.dims):
                     darray_nan = np.nan * darray.isel({dim: -1})
-                    darray = concat([darray, darray_nan], dim=dim)
+                    darray = concat(
+                        [darray, darray_nan],
+                        dim=dim,
+                        coords="minimal",
+                        compat="override",
+                        join="exact",
+                    )
                     dims_T.append(coords_to_plot[v])
 
         # Lines should never connect to the same coordinate when stacked,
@@ -524,7 +530,7 @@ def line(
         assert hueplt is not None
         ax.legend(handles=primitive, labels=list(hueplt.to_numpy()), title=hue_label)
 
-    if np.issubdtype(xplt.dtype, np.datetime64):
+    if isinstance(xplt.dtype, np.dtype) and np.issubdtype(xplt.dtype, np.datetime64):  # type: ignore[redundant-expr]
         _set_concise_date(ax, axis="x")
 
     _update_axes(ax, xincrease, yincrease, xscale, yscale, xticks, yticks, xlim, ylim)
@@ -697,8 +703,8 @@ def hist(
 
     ax = get_axis(figsize, size, aspect, ax)
 
-    no_nan = np.ravel(darray.to_numpy())
-    no_nan = no_nan[pd.notnull(no_nan)]
+    no_nan_arr = np.ravel(darray.to_numpy())
+    no_nan = no_nan_arr[pd.notnull(no_nan_arr)]
 
     n, bins, patches = cast(
         tuple[np.ndarray, np.ndarray, Union["BarContainer", "Polygon"]],
@@ -716,115 +722,115 @@ def hist(
 def _plot1d(plotfunc):
     """Decorator for common 1d plotting logic."""
     commondoc = """
-    Parameters
-    ----------
-    darray : DataArray
-        Must be 2 dimensional, unless creating faceted plots.
-    x : Hashable or None, optional
-        Coordinate for x axis. If None use darray.dims[1].
-    y : Hashable or None, optional
-        Coordinate for y axis. If None use darray.dims[0].
-    z : Hashable or None, optional
-        If specified plot 3D and use this coordinate for *z* axis.
-    hue : Hashable or None, optional
-        Dimension or coordinate for which you want multiple lines plotted.
-    markersize: Hashable or None, optional
-        scatter only. Variable by which to vary size of scattered points.
-    linewidth: Hashable or None, optional
-        Variable by which to vary linewidth.
-    row : Hashable, optional
-        If passed, make row faceted plots on this dimension name.
-    col : Hashable, optional
-        If passed, make column faceted plots on this dimension name.
-    col_wrap : int, optional
-        Use together with ``col`` to wrap faceted plots
-    ax : matplotlib axes object, optional
-        If None, uses the current axis. Not applicable when using facets.
-    figsize : Iterable[float] or None, optional
-        A tuple (width, height) of the figure in inches.
-        Mutually exclusive with ``size`` and ``ax``.
-    size : scalar, optional
-        If provided, create a new figure for the plot with the given size.
-        Height (in inches) of each plot. See also: ``aspect``.
-    aspect : "auto", "equal", scalar or None, optional
-        Aspect ratio of plot, so that ``aspect * size`` gives the width in
-        inches. Only used if a ``size`` is provided.
-    xincrease : bool or None, default: True
-        Should the values on the x axes be increasing from left to right?
-        if None, use the default for the matplotlib function.
-    yincrease : bool or None, default: True
-        Should the values on the y axes be increasing from top to bottom?
-        if None, use the default for the matplotlib function.
-    add_legend : bool or None, optional
-        If True use xarray metadata to add a legend.
-    add_colorbar : bool or None, optional
-        If True add a colorbar.
-    add_labels : bool or None, optional
-        If True use xarray metadata to label axes
-    add_title : bool or None, optional
-        If True use xarray metadata to add a title
-    subplot_kws : dict, optional
-        Dictionary of keyword arguments for matplotlib subplots. Only applies
-        to FacetGrid plotting.
-    xscale : {'linear', 'symlog', 'log', 'logit'} or None, optional
-        Specifies scaling for the x-axes.
-    yscale : {'linear', 'symlog', 'log', 'logit'} or None, optional
-        Specifies scaling for the y-axes.
-    xticks : ArrayLike or None, optional
-        Specify tick locations for x-axes.
-    yticks : ArrayLike or None, optional
-        Specify tick locations for y-axes.
-    xlim : tuple[float, float] or None, optional
-        Specify x-axes limits.
-    ylim : tuple[float, float] or None, optional
-        Specify y-axes limits.
-    cmap : matplotlib colormap name or colormap, optional
-        The mapping from data values to color space. Either a
-        Matplotlib colormap name or object. If not provided, this will
-        be either ``'viridis'`` (if the function infers a sequential
-        dataset) or ``'RdBu_r'`` (if the function infers a diverging
-        dataset).
-        See :doc:`Choosing Colormaps in Matplotlib <matplotlib:users/explain/colors/colormaps>`
-        for more information.
-
-        If *seaborn* is installed, ``cmap`` may also be a
-        `seaborn color palette <https://seaborn.pydata.org/tutorial/color_palettes.html>`_.
-        Note: if ``cmap`` is a seaborn color palette,
-        ``levels`` must also be specified.
-    vmin : float or None, optional
-        Lower value to anchor the colormap, otherwise it is inferred from the
-        data and other keyword arguments. When a diverging dataset is inferred,
-        setting `vmin` or `vmax` will fix the other by symmetry around
-        ``center``. Setting both values prevents use of a diverging colormap.
-        If discrete levels are provided as an explicit list, both of these
-        values are ignored.
-    vmax : float or None, optional
-        Upper value to anchor the colormap, otherwise it is inferred from the
-        data and other keyword arguments. When a diverging dataset is inferred,
-        setting `vmin` or `vmax` will fix the other by symmetry around
-        ``center``. Setting both values prevents use of a diverging colormap.
-        If discrete levels are provided as an explicit list, both of these
-        values are ignored.
-    norm : matplotlib.colors.Normalize, optional
-        If ``norm`` has ``vmin`` or ``vmax`` specified, the corresponding
-        kwarg must be ``None``.
-    extend : {'neither', 'both', 'min', 'max'}, optional
-        How to draw arrows extending the colorbar beyond its limits. If not
-        provided, ``extend`` is inferred from ``vmin``, ``vmax`` and the data limits.
-    levels : int or array-like, optional
-        Split the colormap (``cmap``) into discrete color intervals. If an integer
-        is provided, "nice" levels are chosen based on the data range: this can
-        imply that the final number of levels is not exactly the expected one.
-        Setting ``vmin`` and/or ``vmax`` with ``levels=N`` is equivalent to
-        setting ``levels=np.linspace(vmin, vmax, N)``.
-    **kwargs : optional
-        Additional arguments to wrapped matplotlib function
-
-    Returns
-    -------
-    artist :
-        The same type of primitive artist that the wrapped matplotlib
-        function returns
+Parameters
+----------
+darray : DataArray
+    Must be 2 dimensional, unless creating faceted plots.
+x : Hashable or None, optional
+    Coordinate for x axis. If None use darray.dims[1].
+y : Hashable or None, optional
+    Coordinate for y axis. If None use darray.dims[0].
+z : Hashable or None, optional
+    If specified plot 3D and use this coordinate for *z* axis.
+hue : Hashable or None, optional
+    Dimension or coordinate for which you want multiple lines plotted.
+markersize: Hashable or None, optional
+    scatter only. Variable by which to vary size of scattered points.
+linewidth: Hashable or None, optional
+    Variable by which to vary linewidth.
+row : Hashable, optional
+    If passed, make row faceted plots on this dimension name.
+col : Hashable, optional
+    If passed, make column faceted plots on this dimension name.
+col_wrap : int, optional
+    Use together with ``col`` to wrap faceted plots
+ax : matplotlib axes object, optional
+    If None, uses the current axis. Not applicable when using facets.
+figsize : Iterable[float] or None, optional
+    A tuple (width, height) of the figure in inches.
+    Mutually exclusive with ``size`` and ``ax``.
+size : scalar, optional
+    If provided, create a new figure for the plot with the given size.
+    Height (in inches) of each plot. See also: ``aspect``.
+aspect : "auto", "equal", scalar or None, optional
+    Aspect ratio of plot, so that ``aspect * size`` gives the width in
+    inches. Only used if a ``size`` is provided.
+xincrease : bool or None, default: True
+    Should the values on the x axes be increasing from left to right?
+    if None, use the default for the matplotlib function.
+yincrease : bool or None, default: True
+    Should the values on the y axes be increasing from top to bottom?
+    if None, use the default for the matplotlib function.
+add_legend : bool or None, optional
+    If True use xarray metadata to add a legend.
+add_colorbar : bool or None, optional
+    If True add a colorbar.
+add_labels : bool or None, optional
+    If True use xarray metadata to label axes
+add_title : bool or None, optional
+    If True use xarray metadata to add a title
+subplot_kws : dict, optional
+    Dictionary of keyword arguments for matplotlib subplots. Only applies
+    to FacetGrid plotting.
+xscale : {'linear', 'symlog', 'log', 'logit'} or None, optional
+    Specifies scaling for the x-axes.
+yscale : {'linear', 'symlog', 'log', 'logit'} or None, optional
+    Specifies scaling for the y-axes.
+xticks : ArrayLike or None, optional
+    Specify tick locations for x-axes.
+yticks : ArrayLike or None, optional
+    Specify tick locations for y-axes.
+xlim : tuple[float, float] or None, optional
+    Specify x-axes limits.
+ylim : tuple[float, float] or None, optional
+    Specify y-axes limits.
+cmap : matplotlib colormap name or colormap, optional
+    The mapping from data values to color space. Either a
+    Matplotlib colormap name or object. If not provided, this will
+    be either ``'viridis'`` (if the function infers a sequential
+    dataset) or ``'RdBu_r'`` (if the function infers a diverging
+    dataset).
+    See :doc:`Choosing Colormaps in Matplotlib <matplotlib:users/explain/colors/colormaps>`
+    for more information.
+
+    If *seaborn* is installed, ``cmap`` may also be a
+    `seaborn color palette <https://seaborn.pydata.org/tutorial/color_palettes.html>`_.
+    Note: if ``cmap`` is a seaborn color palette,
+    ``levels`` must also be specified.
+vmin : float or None, optional
+    Lower value to anchor the colormap, otherwise it is inferred from the
+    data and other keyword arguments. When a diverging dataset is inferred,
+    setting `vmin` or `vmax` will fix the other by symmetry around
+    ``center``. Setting both values prevents use of a diverging colormap.
+    If discrete levels are provided as an explicit list, both of these
+    values are ignored.
+vmax : float or None, optional
+    Upper value to anchor the colormap, otherwise it is inferred from the
+    data and other keyword arguments. When a diverging dataset is inferred,
+    setting `vmin` or `vmax` will fix the other by symmetry around
+    ``center``. Setting both values prevents use of a diverging colormap.
+    If discrete levels are provided as an explicit list, both of these
+    values are ignored.
+norm : matplotlib.colors.Normalize, optional
+    If ``norm`` has ``vmin`` or ``vmax`` specified, the corresponding
+    kwarg must be ``None``.
+extend : {'neither', 'both', 'min', 'max'}, optional
+    How to draw arrows extending the colorbar beyond its limits. If not
+    provided, ``extend`` is inferred from ``vmin``, ``vmax`` and the data limits.
+levels : int or array-like, optional
+    Split the colormap (``cmap``) into discrete color intervals. If an integer
+    is provided, "nice" levels are chosen based on the data range: this can
+    imply that the final number of levels is not exactly the expected one.
+    Setting ``vmin`` and/or ``vmax`` with ``levels=N`` is equivalent to
+    setting ``levels=np.linspace(vmin, vmax, N)``.
+**kwargs : optional
+    Additional arguments to wrapped matplotlib function
+
+Returns
+-------
+artist :
+    The same type of primitive artist that the wrapped matplotlib
+    function returns
     """
 
     # Build on the original docstring
@@ -1279,124 +1285,123 @@ def scatter(
 def _plot2d(plotfunc):
     """Decorator for common 2d plotting logic."""
     commondoc = """
-    Parameters
-    ----------
-    darray : DataArray
-        Must be two-dimensional, unless creating faceted plots.
-    x : Hashable or None, optional
-        Coordinate for *x* axis. If ``None``, use ``darray.dims[1]``.
-    y : Hashable or None, optional
-        Coordinate for *y* axis. If ``None``, use ``darray.dims[0]``.
-    figsize : Iterable or float or None, optional
-        A tuple (width, height) of the figure in inches.
-        Mutually exclusive with ``size`` and ``ax``.
-    size : scalar, optional
-        If provided, create a new figure for the plot with the given size:
-        *height* (in inches) of each plot. See also: ``aspect``.
-    aspect : "auto", "equal", scalar or None, optional
-        Aspect ratio of plot, so that ``aspect * size`` gives the *width* in
-        inches. Only used if a ``size`` is provided.
-    ax : matplotlib axes object, optional
-        Axes on which to plot. By default, use the current axes.
-        Mutually exclusive with ``size`` and ``figsize``.
-    row : Hashable or None, optional
-        If passed, make row faceted plots on this dimension name.
-    col : Hashable or None, optional
-        If passed, make column faceted plots on this dimension name.
-    col_wrap : int, optional
-        Use together with ``col`` to wrap faceted plots.
-    xincrease : None, True, or False, optional
-        Should the values on the *x* axis be increasing from left to right?
-        If ``None``, use the default for the Matplotlib function.
-    yincrease : None, True, or False, optional
-        Should the values on the *y* axis be increasing from top to bottom?
-        If ``None``, use the default for the Matplotlib function.
-    add_colorbar : bool, optional
-        Add colorbar to axes.
-    add_labels : bool, optional
-        Use xarray metadata to label axes.
-    vmin : float or None, optional
-        Lower value to anchor the colormap, otherwise it is inferred from the
-        data and other keyword arguments. When a diverging dataset is inferred,
-        setting `vmin` or `vmax` will fix the other by symmetry around
-        ``center``. Setting both values prevents use of a diverging colormap.
-        If discrete levels are provided as an explicit list, both of these
-        values are ignored.
-    vmax : float or None, optional
-        Upper value to anchor the colormap, otherwise it is inferred from the
-        data and other keyword arguments. When a diverging dataset is inferred,
-        setting `vmin` or `vmax` will fix the other by symmetry around
-        ``center``. Setting both values prevents use of a diverging colormap.
-        If discrete levels are provided as an explicit list, both of these
-        values are ignored.
-    cmap : matplotlib colormap name or colormap, optional
-        The mapping from data values to color space. If not provided, this
-        will be either be ``'viridis'`` (if the function infers a sequential
-        dataset) or ``'RdBu_r'`` (if the function infers a diverging dataset).
-        See :doc:`Choosing Colormaps in Matplotlib <matplotlib:users/explain/colors/colormaps>`
-        for more information.
-
-        If *seaborn* is installed, ``cmap`` may also be a
-        `seaborn color palette <https://seaborn.pydata.org/tutorial/color_palettes.html>`_.
-        Note: if ``cmap`` is a seaborn color palette and the plot type
-        is not ``'contour'`` or ``'contourf'``, ``levels`` must also be specified.
-    center : float or False, optional
-        The value at which to center the colormap. Passing this value implies
-        use of a diverging colormap. Setting it to ``False`` prevents use of a
-        diverging colormap.
-    robust : bool, optional
-        If ``True`` and ``vmin`` or ``vmax`` are absent, the colormap range is
-        computed with 2nd and 98th percentiles instead of the extreme values.
-    extend : {'neither', 'both', 'min', 'max'}, optional
-        How to draw arrows extending the colorbar beyond its limits. If not
-        provided, ``extend`` is inferred from ``vmin``, ``vmax`` and the data limits.
-    levels : int or array-like, optional
-        Split the colormap (``cmap``) into discrete color intervals. If an integer
-        is provided, "nice" levels are chosen based on the data range: this can
-        imply that the final number of levels is not exactly the expected one.
-        Setting ``vmin`` and/or ``vmax`` with ``levels=N`` is equivalent to
-        setting ``levels=np.linspace(vmin, vmax, N)``.
-    infer_intervals : bool, optional
-        Only applies to pcolormesh. If ``True``, the coordinate intervals are
-        passed to pcolormesh. If ``False``, the original coordinates are used
-        (this can be useful for certain map projections). The default is to
-        always infer intervals, unless the mesh is irregular and plotted on
-        a map projection.
-    colors : str or array-like of color-like, optional
-        A single color or a sequence of colors. If the plot type is not ``'contour'``
-        or ``'contourf'``, the ``levels`` argument is required.
-    subplot_kws : dict, optional
-        Dictionary of keyword arguments for Matplotlib subplots. Only used
-        for 2D and faceted plots.
-        (see :py:meth:`matplotlib:matplotlib.figure.Figure.add_subplot`).
-    cbar_ax : matplotlib axes object, optional
-        Axes in which to draw the colorbar.
-    cbar_kwargs : dict, optional
-        Dictionary of keyword arguments to pass to the colorbar
-        (see :meth:`matplotlib:matplotlib.figure.Figure.colorbar`).
-    xscale : {'linear', 'symlog', 'log', 'logit'} or None, optional
-        Specifies scaling for the x-axes.
-    yscale : {'linear', 'symlog', 'log', 'logit'} or None, optional
-        Specifies scaling for the y-axes.
-    xticks : ArrayLike or None, optional
-        Specify tick locations for x-axes.
-    yticks : ArrayLike or None, optional
-        Specify tick locations for y-axes.
-    xlim : tuple[float, float] or None, optional
-        Specify x-axes limits.
-    ylim : tuple[float, float] or None, optional
-        Specify y-axes limits.
-    norm : matplotlib.colors.Normalize, optional
-        If ``norm`` has ``vmin`` or ``vmax`` specified, the corresponding
-        kwarg must be ``None``.
-    **kwargs : optional
-        Additional keyword arguments to wrapped Matplotlib function.
-
-    Returns
-    -------
-    artist :
-        The same type of primitive artist that the wrapped Matplotlib
-        function returns.
+Parameters
+----------
+darray : DataArray
+    Must be two-dimensional, unless creating faceted plots.
+x : Hashable or None, optional
+    Coordinate for *x* axis. If ``None``, use ``darray.dims[1]``.
+y : Hashable or None, optional
+    Coordinate for *y* axis. If ``None``, use ``darray.dims[0]``.
+figsize : Iterable or float or None, optional
+    A tuple (width, height) of the figure in inches.
+    Mutually exclusive with ``size`` and ``ax``.
+size : scalar, optional
+    If provided, create a new figure for the plot with the given size:
+    *height* (in inches) of each plot. See also: ``aspect``.
+aspect : "auto", "equal", scalar or None, optional
+    Aspect ratio of plot, so that ``aspect * size`` gives the *width* in
+    inches. Only used if a ``size`` is provided.
+ax : matplotlib axes object, optional
+    Axes on which to plot. By default, use the current axes.
+    Mutually exclusive with ``size`` and ``figsize``.
+row : Hashable or None, optional
+    If passed, make row faceted plots on this dimension name.
+col : Hashable or None, optional
+    If passed, make column faceted plots on this dimension name.
+col_wrap : int, optional
+    Use together with ``col`` to wrap faceted plots.
+xincrease : None, True, or False, optional
+    Should the values on the *x* axis be increasing from left to right?
+    If ``None``, use the default for the Matplotlib function.
+yincrease : None, True, or False, optional
+    Should the values on the *y* axis be increasing from top to bottom?
+    If ``None``, use the default for the Matplotlib function.
+add_colorbar : bool, optional
+    Add colorbar to axes.
+add_labels : bool, optional
+    Use xarray metadata to label axes.
+vmin : float or None, optional
+    Lower value to anchor the colormap, otherwise it is inferred from the
+    data and other keyword arguments. When a diverging dataset is inferred,
+    setting `vmin` or `vmax` will fix the other by symmetry around
+    ``center``. Setting both values prevents use of a diverging colormap.
+    If discrete levels are provided as an explicit list, both of these
+    values are ignored.
+vmax : float or None, optional
+    Upper value to anchor the colormap, otherwise it is inferred from the
+    data and other keyword arguments. When a diverging dataset is inferred,
+    setting `vmin` or `vmax` will fix the other by symmetry around
+    ``center``. Setting both values prevents use of a diverging colormap.
+    If discrete levels are provided as an explicit list, both of these
+    values are ignored.
+cmap : matplotlib colormap name or colormap, optional
+    The mapping from data values to color space. If not provided, this
+    will be either be ``'viridis'`` (if the function infers a sequential
+    dataset) or ``'RdBu_r'`` (if the function infers a diverging dataset).
+    See :doc:`Choosing Colormaps in Matplotlib <matplotlib:users/explain/colors/colormaps>`
+    for more information.
+    If *seaborn* is installed, ``cmap`` may also be a
+    `seaborn color palette <https://seaborn.pydata.org/tutorial/color_palettes.html>`_.
+    Note: if ``cmap`` is a seaborn color palette and the plot type
+    is not ``'contour'`` or ``'contourf'``, ``levels`` must also be specified.
+center : float or False, optional
+    The value at which to center the colormap. Passing this value implies
+    use of a diverging colormap. Setting it to ``False`` prevents use of a
+    diverging colormap.
+robust : bool, optional
+    If ``True`` and ``vmin`` or ``vmax`` are absent, the colormap range is
+    computed with 2nd and 98th percentiles instead of the extreme values.
+extend : {'neither', 'both', 'min', 'max'}, optional
+    How to draw arrows extending the colorbar beyond its limits. If not
+    provided, ``extend`` is inferred from ``vmin``, ``vmax`` and the data limits.
+levels : int or array-like, optional
+    Split the colormap (``cmap``) into discrete color intervals. If an integer
+    is provided, "nice" levels are chosen based on the data range: this can
+    imply that the final number of levels is not exactly the expected one.
+    Setting ``vmin`` and/or ``vmax`` with ``levels=N`` is equivalent to
+    setting ``levels=np.linspace(vmin, vmax, N)``.
+infer_intervals : bool, optional
+    Only applies to pcolormesh. If ``True``, the coordinate intervals are
+    passed to pcolormesh. If ``False``, the original coordinates are used
+    (this can be useful for certain map projections). The default is to
+    always infer intervals, unless the mesh is irregular and plotted on
+    a map projection.
+colors : str or array-like of color-like, optional
+    A single color or a sequence of colors. If the plot type is not ``'contour'``
+    or ``'contourf'``, the ``levels`` argument is required.
+subplot_kws : dict, optional
+    Dictionary of keyword arguments for Matplotlib subplots. Only used
+    for 2D and faceted plots.
+    (see :py:meth:`matplotlib:matplotlib.figure.Figure.add_subplot`).
+cbar_ax : matplotlib axes object, optional
+    Axes in which to draw the colorbar.
+cbar_kwargs : dict, optional
+    Dictionary of keyword arguments to pass to the colorbar
+    (see :meth:`matplotlib:matplotlib.figure.Figure.colorbar`).
+xscale : {'linear', 'symlog', 'log', 'logit'} or None, optional
+    Specifies scaling for the x-axes.
+yscale : {'linear', 'symlog', 'log', 'logit'} or None, optional
+    Specifies scaling for the y-axes.
+xticks : ArrayLike or None, optional
+    Specify tick locations for x-axes.
+yticks : ArrayLike or None, optional
+    Specify tick locations for y-axes.
+xlim : tuple[float, float] or None, optional
+    Specify x-axes limits.
+ylim : tuple[float, float] or None, optional
+    Specify y-axes limits.
+norm : matplotlib.colors.Normalize, optional
+    If ``norm`` has ``vmin`` or ``vmax`` specified, the corresponding
+    kwarg must be ``None``.
+**kwargs : optional
+    Additional keyword arguments to wrapped Matplotlib function.
+
+Returns
+-------
+artist :
+    The same type of primitive artist that the wrapped Matplotlib
+    function returns.
     """
 
     # Build on the original docstring
@@ -1480,7 +1485,7 @@ def _plot2d(plotfunc):
         if subplot_kws is None:
             subplot_kws = dict()
 
-        if plotfunc.__name__ == "surface" and not kwargs.get("_is_facetgrid", False):
+        if plotfunc.__name__ == "surface" and not kwargs.get("_is_facetgrid"):
             if ax is None:
                 # TODO: Importing Axes3D is no longer necessary in matplotlib >= 3.2.
                 # Remove when minimum requirement of matplotlib is 3.2:
@@ -1512,7 +1517,7 @@ def _plot2d(plotfunc):
 
         if (
             plotfunc.__name__ == "surface"
-            and not kwargs.get("_is_facetgrid", False)
+            and not kwargs.get("_is_facetgrid")
             and ax is not None
         ):
             import mpl_toolkits
@@ -1589,7 +1594,7 @@ def _plot2d(plotfunc):
             kwargs["levels"] = cmap_params["levels"]
             # if colors == a single color, matplotlib draws dashed negative
             # contours. we lose this feature if we pass cmap and not colors
-            if isinstance(colors, str):
+            if colors is not None:
                 cmap_params["cmap"] = None
                 kwargs["colors"] = colors
 
diff -pruN 2025.03.1-8/xarray/plot/dataset_plot.py 2025.10.1-1/xarray/plot/dataset_plot.py
--- 2025.03.1-8/xarray/plot/dataset_plot.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/xarray/plot/dataset_plot.py	2025-10-10 10:38:05.000000000 +0000
@@ -37,119 +37,119 @@ if TYPE_CHECKING:
 
 def _dsplot(plotfunc):
     commondoc = """
-    Parameters
-    ----------
+Parameters
+----------
 
-    ds : Dataset
-    x : Hashable or None, optional
-        Variable name for x-axis.
-    y : Hashable or None, optional
-        Variable name for y-axis.
-    u : Hashable or None, optional
-        Variable name for the *u* velocity (in *x* direction).
-        quiver/streamplot plots only.
-    v : Hashable or None, optional
-        Variable name for the *v* velocity (in *y* direction).
-        quiver/streamplot plots only.
-    hue: Hashable or None, optional
-        Variable by which to color scatter points or arrows.
-    hue_style: {'continuous', 'discrete'} or None, optional
-        How to use the ``hue`` variable:
-
-        - ``'continuous'`` -- continuous color scale
-          (default for numeric ``hue`` variables)
-        - ``'discrete'`` -- a color for each unique value, using the default color cycle
-          (default for non-numeric ``hue`` variables)
-
-    row : Hashable or None, optional
-        If passed, make row faceted plots on this dimension name.
-    col : Hashable or None, optional
-        If passed, make column faceted plots on this dimension name.
-    col_wrap : int, optional
-        Use together with ``col`` to wrap faceted plots.
-    ax : matplotlib axes object or None, optional
-        If ``None``, use the current axes. Not applicable when using facets.
-    figsize : Iterable[float] or None, optional
-        A tuple (width, height) of the figure in inches.
-        Mutually exclusive with ``size`` and ``ax``.
-    size : scalar, optional
-        If provided, create a new figure for the plot with the given size.
-        Height (in inches) of each plot. See also: ``aspect``.
-    aspect : "auto", "equal", scalar or None, optional
-        Aspect ratio of plot, so that ``aspect * size`` gives the width in
-        inches. Only used if a ``size`` is provided.
-    sharex : bool or None, optional
-        If True all subplots share the same x-axis.
-    sharey : bool or None, optional
-        If True all subplots share the same y-axis.
-    add_guide: bool or None, optional
-        Add a guide that depends on ``hue_style``:
-
-        - ``'continuous'`` -- build a colorbar
-        - ``'discrete'`` -- build a legend
-
-    subplot_kws : dict or None, optional
-        Dictionary of keyword arguments for Matplotlib subplots
-        (see :py:meth:`matplotlib:matplotlib.figure.Figure.add_subplot`).
-        Only applies to FacetGrid plotting.
-    cbar_kwargs : dict, optional
-        Dictionary of keyword arguments to pass to the colorbar
-        (see :meth:`matplotlib:matplotlib.figure.Figure.colorbar`).
-    cbar_ax : matplotlib axes object, optional
-        Axes in which to draw the colorbar.
-    cmap : matplotlib colormap name or colormap, optional
-        The mapping from data values to color space. Either a
-        Matplotlib colormap name or object. If not provided, this will
-        be either ``'viridis'`` (if the function infers a sequential
-        dataset) or ``'RdBu_r'`` (if the function infers a diverging
-        dataset).
-        See :doc:`Choosing Colormaps in Matplotlib <matplotlib:users/explain/colors/colormaps>`
-        for more information.
-
-        If *seaborn* is installed, ``cmap`` may also be a
-        `seaborn color palette <https://seaborn.pydata.org/tutorial/color_palettes.html>`_.
-        Note: if ``cmap`` is a seaborn color palette,
-        ``levels`` must also be specified.
-    vmin : float or None, optional
-        Lower value to anchor the colormap, otherwise it is inferred from the
-        data and other keyword arguments. When a diverging dataset is inferred,
-        setting `vmin` or `vmax` will fix the other by symmetry around
-        ``center``. Setting both values prevents use of a diverging colormap.
-        If discrete levels are provided as an explicit list, both of these
-        values are ignored.
-    vmax : float or None, optional
-        Upper value to anchor the colormap, otherwise it is inferred from the
-        data and other keyword arguments. When a diverging dataset is inferred,
-        setting `vmin` or `vmax` will fix the other by symmetry around
-        ``center``. Setting both values prevents use of a diverging colormap.
-        If discrete levels are provided as an explicit list, both of these
-        values are ignored.
-    norm : matplotlib.colors.Normalize, optional
-        If ``norm`` has ``vmin`` or ``vmax`` specified, the corresponding
-        kwarg must be ``None``.
-    infer_intervals: bool | None
-        If True the intervals are inferred.
-    center : float, optional
-        The value at which to center the colormap. Passing this value implies
-        use of a diverging colormap. Setting it to ``False`` prevents use of a
-        diverging colormap.
-    robust : bool, optional
-        If ``True`` and ``vmin`` or ``vmax`` are absent, the colormap range is
-        computed with 2nd and 98th percentiles instead of the extreme values.
-    colors : str or array-like of color-like, optional
-        A single color or a list of colors. The ``levels`` argument
-        is required.
-    extend : {'neither', 'both', 'min', 'max'}, optional
-        How to draw arrows extending the colorbar beyond its limits. If not
-        provided, ``extend`` is inferred from ``vmin``, ``vmax`` and the data limits.
-    levels : int or array-like, optional
-        Split the colormap (``cmap``) into discrete color intervals. If an integer
-        is provided, "nice" levels are chosen based on the data range: this can
-        imply that the final number of levels is not exactly the expected one.
-        Setting ``vmin`` and/or ``vmax`` with ``levels=N`` is equivalent to
-        setting ``levels=np.linspace(vmin, vmax, N)``.
-    **kwargs : optional
-        Additional keyword arguments to wrapped Matplotlib function.
+ds : Dataset
+x : Hashable or None, optional
+    Variable name for x-axis.
+y : Hashable or None, optional
+    Variable name for y-axis.
+u : Hashable or None, optional
+    Variable name for the *u* velocity (in *x* direction).
+    quiver/streamplot plots only.
+v : Hashable or None, optional
+    Variable name for the *v* velocity (in *y* direction).
+    quiver/streamplot plots only.
+hue: Hashable or None, optional
+    Variable by which to color scatter points or arrows.
+hue_style: {'continuous', 'discrete'} or None, optional
+    How to use the ``hue`` variable:
+
+    - ``'continuous'`` -- continuous color scale
+        (default for numeric ``hue`` variables)
+    - ``'discrete'`` -- a color for each unique value, using the default color cycle
+        (default for non-numeric ``hue`` variables)
+
+row : Hashable or None, optional
+    If passed, make row faceted plots on this dimension name.
+col : Hashable or None, optional
+    If passed, make column faceted plots on this dimension name.
+col_wrap : int, optional
+    Use together with ``col`` to wrap faceted plots.
+ax : matplotlib axes object or None, optional
+    If ``None``, use the current axes. Not applicable when using facets.
+figsize : Iterable[float] or None, optional
+    A tuple (width, height) of the figure in inches.
+    Mutually exclusive with ``size`` and ``ax``.
+size : scalar, optional
+    If provided, create a new figure for the plot with the given size.
+    Height (in inches) of each plot. See also: ``aspect``.
+aspect : "auto", "equal", scalar or None, optional
+    Aspect ratio of plot, so that ``aspect * size`` gives the width in
+    inches. Only used if a ``size`` is provided.
+sharex : bool or None, optional
+    If True all subplots share the same x-axis.
+sharey : bool or None, optional
+    If True all subplots share the same y-axis.
+add_guide: bool or None, optional
+    Add a guide that depends on ``hue_style``:
+
+    - ``'continuous'`` -- build a colorbar
+    - ``'discrete'`` -- build a legend
+
+subplot_kws : dict or None, optional
+    Dictionary of keyword arguments for Matplotlib subplots
+    (see :py:meth:`matplotlib:matplotlib.figure.Figure.add_subplot`).
+    Only applies to FacetGrid plotting.
+cbar_kwargs : dict, optional
+    Dictionary of keyword arguments to pass to the colorbar
+    (see :meth:`matplotlib:matplotlib.figure.Figure.colorbar`).
+cbar_ax : matplotlib axes object, optional
+    Axes in which to draw the colorbar.
+cmap : matplotlib colormap name or colormap, optional
+    The mapping from data values to color space. Either a
+    Matplotlib colormap name or object. If not provided, this will
+    be either ``'viridis'`` (if the function infers a sequential
+    dataset) or ``'RdBu_r'`` (if the function infers a diverging
+    dataset).
+    See :doc:`Choosing Colormaps in Matplotlib <matplotlib:users/explain/colors/colormaps>`
+    for more information.
+
+    If *seaborn* is installed, ``cmap`` may also be a
+    `seaborn color palette <https://seaborn.pydata.org/tutorial/color_palettes.html>`_.
+    Note: if ``cmap`` is a seaborn color palette,
+    ``levels`` must also be specified.
+vmin : float or None, optional
+    Lower value to anchor the colormap, otherwise it is inferred from the
+    data and other keyword arguments. When a diverging dataset is inferred,
+    setting `vmin` or `vmax` will fix the other by symmetry around
+    ``center``. Setting both values prevents use of a diverging colormap.
+    If discrete levels are provided as an explicit list, both of these
+    values are ignored.
+vmax : float or None, optional
+    Upper value to anchor the colormap, otherwise it is inferred from the
+    data and other keyword arguments. When a diverging dataset is inferred,
+    setting `vmin` or `vmax` will fix the other by symmetry around
+    ``center``. Setting both values prevents use of a diverging colormap.
+    If discrete levels are provided as an explicit list, both of these
+    values are ignored.
+norm : matplotlib.colors.Normalize, optional
+    If ``norm`` has ``vmin`` or ``vmax`` specified, the corresponding
+    kwarg must be ``None``.
+infer_intervals: bool | None
+    If True the intervals are inferred.
+center : float, optional
+    The value at which to center the colormap. Passing this value implies
+    use of a diverging colormap. Setting it to ``False`` prevents use of a
+    diverging colormap.
+robust : bool, optional
+    If ``True`` and ``vmin`` or ``vmax`` are absent, the colormap range is
+    computed with 2nd and 98th percentiles instead of the extreme values.
+colors : str or array-like of color-like, optional
+    A single color or a list of colors. The ``levels`` argument
+    is required.
+extend : {'neither', 'both', 'min', 'max'}, optional
+    How to draw arrows extending the colorbar beyond its limits. If not
+    provided, ``extend`` is inferred from ``vmin``, ``vmax`` and the data limits.
+levels : int or array-like, optional
+    Split the colormap (``cmap``) into discrete color intervals. If an integer
+    is provided, "nice" levels are chosen based on the data range: this can
+    imply that the final number of levels is not exactly the expected one.
+    Setting ``vmin`` and/or ``vmax`` with ``levels=N`` is equivalent to
+    setting ``levels=np.linspace(vmin, vmax, N)``.
+**kwargs : optional
+    Additional keyword arguments to wrapped Matplotlib function.
     """
 
     # Build on the original docstring
@@ -630,6 +630,8 @@ def streamplot(
     cmap_params = kwargs.pop("cmap_params")
 
     if hue:
+        if xdim is not None and ydim is not None:
+            ds[hue] = ds[hue].transpose(ydim, xdim)
         kwargs["color"] = ds[hue].values
 
         # TODO: Fix this by always returning a norm with vmin, vmax in cmap_params
diff -pruN 2025.03.1-8/xarray/plot/facetgrid.py 2025.10.1-1/xarray/plot/facetgrid.py
--- 2025.03.1-8/xarray/plot/facetgrid.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/xarray/plot/facetgrid.py	2025-10-10 10:38:05.000000000 +0000
@@ -549,7 +549,7 @@ class FacetGrid(Generic[T_DataArrayOrSet
         )
 
         if add_legend:
-            use_legend_elements = False if func.__name__ == "hist" else True
+            use_legend_elements = func.__name__ != "hist"
             if use_legend_elements:
                 self.add_legend(
                     use_legend_elements=use_legend_elements,
@@ -796,7 +796,7 @@ class FacetGrid(Generic[T_DataArrayOrSet
             # Find the plot with the largest xlim values:
             lower, upper = lims_largest[axis]
             for ax in self.axs.flat:
-                get_lim: None | Callable[[], tuple[float, float]] = getattr(
+                get_lim: Callable[[], tuple[float, float]] | None = getattr(
                     ax, f"get_{axis}lim", None
                 )
                 if get_lim:
@@ -862,15 +862,15 @@ class FacetGrid(Generic[T_DataArrayOrSet
         for ax in axes:
             getattr(ax, f"set_{axis}label")(label, **kwargs)
 
-    def set_xlabels(self, label: None | str = None, **kwargs: Any) -> None:
+    def set_xlabels(self, label: str | None = None, **kwargs: Any) -> None:
         """Label the x axis on the bottom row of the grid."""
         self._set_labels("x", self._bottom_axes, label, **kwargs)
 
-    def set_ylabels(self, label: None | str = None, **kwargs: Any) -> None:
+    def set_ylabels(self, label: str | None = None, **kwargs: Any) -> None:
         """Label the y axis on the left column of the grid."""
         self._set_labels("y", self._left_axes, label, **kwargs)
 
-    def set_zlabels(self, label: None | str = None, **kwargs: Any) -> None:
+    def set_zlabels(self, label: str | None = None, **kwargs: Any) -> None:
         """Label the z axis."""
         self._set_labels("z", self._left_axes, label, **kwargs)
 
@@ -910,14 +910,14 @@ class FacetGrid(Generic[T_DataArrayOrSet
                 # Only label the ones with data
                 if d is not None:
                     coord, value = list(d.items()).pop()
-                    title = nicetitle(coord, value, maxchar=maxchar)
+                    title = nicetitle(coord, value)
                     ax.set_title(title, size=size, **kwargs)
         else:
             # The row titles on the right edge of the grid
             for index, (ax, row_name, handle) in enumerate(
                 zip(self.axs[:, -1], self.row_names, self.row_labels, strict=True)
             ):
-                title = nicetitle(coord=self._row_var, value=row_name, maxchar=maxchar)
+                title = nicetitle(coord=self._row_var, value=row_name)
                 if not handle:
                     self.row_labels[index] = ax.annotate(
                         title,
@@ -936,7 +936,7 @@ class FacetGrid(Generic[T_DataArrayOrSet
             for index, (ax, col_name, handle) in enumerate(
                 zip(self.axs[0, :], self.col_names, self.col_labels, strict=True)
             ):
-                title = nicetitle(coord=self._col_var, value=col_name, maxchar=maxchar)
+                title = nicetitle(coord=self._col_var, value=col_name)
                 if not handle:
                     self.col_labels[index] = ax.set_title(title, size=size, **kwargs)
                 else:
diff -pruN 2025.03.1-8/xarray/plot/utils.py 2025.10.1-1/xarray/plot/utils.py
--- 2025.03.1-8/xarray/plot/utils.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/xarray/plot/utils.py	2025-10-10 10:38:05.000000000 +0000
@@ -13,7 +13,7 @@ from collections.abc import (
 )
 from datetime import date, datetime
 from inspect import getfullargspec
-from typing import TYPE_CHECKING, Any, Literal, overload
+from typing import TYPE_CHECKING, Any, Literal, cast, overload
 
 import numpy as np
 import pandas as pd
@@ -248,9 +248,7 @@ def _determine_cmap_params(
             isinstance(levels, Iterable) and levels[0] * levels[-1] < 0
         )
         # kwargs not specific about divergent or not: infer defaults from data
-        divergent = (
-            ((vmin < 0) and (vmax > 0)) or not center_is_none or levels_are_divergent
-        )
+        divergent = (vmin < 0 < vmax) or not center_is_none or levels_are_divergent
     else:
         divergent = False
 
@@ -421,9 +419,10 @@ def _infer_xy_labels(
         _assert_valid_xy(darray, x, "x")
         _assert_valid_xy(darray, y, "y")
 
-        if darray._indexes.get(x, 1) is darray._indexes.get(y, 2):
-            if isinstance(darray._indexes[x], PandasMultiIndex):
-                raise ValueError("x and y cannot be levels of the same MultiIndex")
+        if darray._indexes.get(x, 1) is darray._indexes.get(y, 2) and isinstance(
+            darray._indexes[x], PandasMultiIndex
+        ):
+            raise ValueError("x and y cannot be levels of the same MultiIndex")
 
     return x, y
 
@@ -459,8 +458,6 @@ def get_axis(
     ax: Axes | None = None,
     **subplot_kws: Any,
 ) -> Axes:
-    from xarray.core.utils import attempt_import
-
     if TYPE_CHECKING:
         import matplotlib as mpl
         import matplotlib.pyplot as plt
@@ -798,16 +795,16 @@ def _update_axes(
     """
     if xincrease is None:
         pass
-    elif xincrease and ax.xaxis_inverted():
-        ax.invert_xaxis()
-    elif not xincrease and not ax.xaxis_inverted():
+    elif (xincrease and ax.xaxis_inverted()) or (
+        not xincrease and not ax.xaxis_inverted()
+    ):
         ax.invert_xaxis()
 
     if yincrease is None:
         pass
-    elif yincrease and ax.yaxis_inverted():
-        ax.invert_yaxis()
-    elif not yincrease and not ax.yaxis_inverted():
+    elif (yincrease and ax.yaxis_inverted()) or (
+        not yincrease and not ax.yaxis_inverted()
+    ):
         ax.invert_yaxis()
 
     # The default xscale, yscale needs to be None.
@@ -934,9 +931,6 @@ def _process_cmap_cbar_kwargs(
 
     cbar_kwargs = {} if cbar_kwargs is None else dict(cbar_kwargs)
 
-    if "contour" in func.__name__ and levels is None:
-        levels = 7  # this is the matplotlib default
-
     # colors is mutually exclusive with cmap
     if cmap and colors:
         raise ValueError("Can't specify both cmap and colors.")
@@ -957,7 +951,7 @@ def _process_cmap_cbar_kwargs(
     cmap_kwargs = {
         "plot_data": data,
         "levels": levels,
-        "cmap": colors if colors else cmap,
+        "cmap": colors or cmap,
         "filled": func.__name__ != "contour",
     }
 
@@ -1050,8 +1044,6 @@ def legend_elements(
     labels : list of str
         The string labels for elements of the legend.
     """
-    import warnings
-
     import matplotlib as mpl
 
     mlines = mpl.lines
@@ -1148,7 +1140,7 @@ def legend_elements(
         # Labels are not numerical so modifying label_values is not
         # possible, instead filter the array with nicely distributed
         # indexes:
-        if type(num) == int:  # noqa: E721
+        if type(num) is int:
             loc = mpl.ticker.LinearLocator(num)
         else:
             raise ValueError("`num` only supports integers for non-numeric labels.")
@@ -1253,8 +1245,8 @@ def _infer_meta_data(ds, x, y, hue, hue_
             )
 
         if add_guide is None or add_guide is True:
-            add_colorbar = True if hue_style == "continuous" else False
-            add_legend = True if hue_style == "discrete" else False
+            add_colorbar = hue_style == "continuous"
+            add_legend = hue_style == "discrete"
         else:
             add_colorbar = False
             add_legend = False
@@ -1278,16 +1270,15 @@ def _infer_meta_data(ds, x, y, hue, hue_
     else:
         add_quiverkey = False
 
-    if (add_guide or add_guide is None) and funcname == "streamplot":
-        if hue:
-            add_colorbar = True
-            if not hue_style:
-                hue_style = "continuous"
-            elif hue_style != "continuous":
-                raise ValueError(
-                    "hue_style must be 'continuous' or None for .plot.quiver or "
-                    ".plot.streamplot"
-                )
+    if (add_guide or add_guide is None) and funcname == "streamplot" and hue:
+        add_colorbar = True
+        if not hue_style:
+            hue_style = "continuous"
+        elif hue_style != "continuous":
+            raise ValueError(
+                "hue_style must be 'continuous' or None for .plot.quiver or "
+                ".plot.streamplot"
+            )
 
     if hue_style is not None and hue_style not in ["discrete", "continuous"]:
         raise ValueError("hue_style must be either None, 'discrete' or 'continuous'.")
@@ -1329,7 +1320,7 @@ def _parse_size(
 def _parse_size(
     data: DataArray | None,
     norm: tuple[float | None, float | None, bool] | Normalize | None,
-) -> None | pd.Series:
+) -> pd.Series | None:
     import matplotlib as mpl
 
     if data is None:
@@ -1343,7 +1334,7 @@ def _parse_size(
     else:
         levels = numbers = np.sort(np.unique(flatdata))
 
-    min_width, default_width, max_width = _MARKERSIZE_RANGE
+    min_width, _default_width, max_width = _MARKERSIZE_RANGE
     # width_range = min_width, max_width
 
     if norm is None:
@@ -1594,7 +1585,7 @@ class _Normalize(Sequence):
         >>> _Normalize(a).ticks
         array([1, 3, 5])
         """
-        val: None | np.ndarray
+        val: np.ndarray | None
         if self.data_is_numeric:
             val = None
         else:
@@ -1653,13 +1644,13 @@ class _Normalize(Sequence):
         """
         import matplotlib.pyplot as plt
 
-        def _func(x: Any, pos: None | Any = None):
+        def _func(x: Any, pos: Any | None = None):
             return f"{self._lookup_arr([x])[0]}"
 
         return plt.FuncFormatter(_func)
 
     @property
-    def func(self) -> Callable[[Any, None | Any], Any]:
+    def func(self) -> Callable[[Any, Any | None], Any]:
         """
         Return a lambda function that maps self.values elements back to
         the original value as a numpy array. Useful with ax.legend_elements.
@@ -1678,7 +1669,7 @@ class _Normalize(Sequence):
         array([0.5, 3. ])
         """
 
-        def _func(x: Any, pos: None | Any = None):
+        def _func(x: Any, pos: Any | None = None):
             return self._lookup_arr(x)
 
         return _func
@@ -1687,8 +1678,8 @@ class _Normalize(Sequence):
 def _determine_guide(
     hueplt_norm: _Normalize,
     sizeplt_norm: _Normalize,
-    add_colorbar: None | bool = None,
-    add_legend: None | bool = None,
+    add_colorbar: bool | None = None,
+    add_legend: bool | None = None,
     plotfunc_name: str | None = None,
 ) -> tuple[bool, bool]:
     if plotfunc_name == "hist":
@@ -1744,8 +1735,8 @@ def _add_legend(
             # Only save unique values:
             u, ind = np.unique(lbl, return_index=True)
             ind = np.argsort(ind)
-            lbl = u[ind].tolist()
-            hdl = np.array(hdl)[ind].tolist()
+            lbl = cast(list, u[ind].tolist())
+            hdl = cast(list, np.array(hdl)[ind].tolist())
 
             # Add a subtitle:
             hdl, lbl = _legend_add_subtitle(hdl, lbl, label_from_attrs(huesizeplt.data))
diff -pruN 2025.03.1-8/xarray/static/css/style.css 2025.10.1-1/xarray/static/css/style.css
--- 2025.03.1-8/xarray/static/css/style.css	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/xarray/static/css/style.css	2025-10-10 10:38:05.000000000 +0000
@@ -1,36 +1,83 @@
-/* CSS stylesheet for displaying xarray objects in jupyterlab.
- *
- */
+/* CSS stylesheet for displaying xarray objects in notebooks */
 
 :root {
-  --xr-font-color0: var(--jp-content-font-color0, rgba(0, 0, 0, 1));
-  --xr-font-color2: var(--jp-content-font-color2, rgba(0, 0, 0, 0.54));
-  --xr-font-color3: var(--jp-content-font-color3, rgba(0, 0, 0, 0.38));
-  --xr-border-color: var(--jp-border-color2, #e0e0e0);
-  --xr-disabled-color: var(--jp-layout-color3, #bdbdbd);
-  --xr-background-color: var(--jp-layout-color0, white);
-  --xr-background-color-row-even: var(--jp-layout-color1, white);
-  --xr-background-color-row-odd: var(--jp-layout-color2, #eeeeee);
+  --xr-font-color0: var(
+    --jp-content-font-color0,
+    var(--pst-color-text-base rgba(0, 0, 0, 1))
+  );
+  --xr-font-color2: var(
+    --jp-content-font-color2,
+    var(--pst-color-text-base, rgba(0, 0, 0, 0.54))
+  );
+  --xr-font-color3: var(
+    --jp-content-font-color3,
+    var(--pst-color-text-base, rgba(0, 0, 0, 0.38))
+  );
+  --xr-border-color: var(
+    --jp-border-color2,
+    hsl(from var(--pst-color-on-background, white) h s calc(l - 10))
+  );
+  --xr-disabled-color: var(
+    --jp-layout-color3,
+    hsl(from var(--pst-color-on-background, white) h s calc(l - 40))
+  );
+  --xr-background-color: var(
+    --jp-layout-color0,
+    var(--pst-color-on-background, white)
+  );
+  --xr-background-color-row-even: var(
+    --jp-layout-color1,
+    hsl(from var(--pst-color-on-background, white) h s calc(l - 5))
+  );
+  --xr-background-color-row-odd: var(
+    --jp-layout-color2,
+    hsl(from var(--pst-color-on-background, white) h s calc(l - 15))
+  );
 }
 
 html[theme="dark"],
 html[data-theme="dark"],
 body[data-theme="dark"],
 body.vscode-dark {
-  --xr-font-color0: rgba(255, 255, 255, 1);
-  --xr-font-color2: rgba(255, 255, 255, 0.54);
-  --xr-font-color3: rgba(255, 255, 255, 0.38);
-  --xr-border-color: #1f1f1f;
-  --xr-disabled-color: #515151;
-  --xr-background-color: #111111;
-  --xr-background-color-row-even: #111111;
-  --xr-background-color-row-odd: #313131;
+  --xr-font-color0: var(
+    --jp-content-font-color0,
+    var(--pst-color-text-base, rgba(255, 255, 255, 1))
+  );
+  --xr-font-color2: var(
+    --jp-content-font-color2,
+    var(--pst-color-text-base, rgba(255, 255, 255, 0.54))
+  );
+  --xr-font-color3: var(
+    --jp-content-font-color3,
+    var(--pst-color-text-base, rgba(255, 255, 255, 0.38))
+  );
+  --xr-border-color: var(
+    --jp-border-color2,
+    hsl(from var(--pst-color-on-background, #111111) h s calc(l + 10))
+  );
+  --xr-disabled-color: var(
+    --jp-layout-color3,
+    hsl(from var(--pst-color-on-background, #111111) h s calc(l + 40))
+  );
+  --xr-background-color: var(
+    --jp-layout-color0,
+    var(--pst-color-on-background, #111111)
+  );
+  --xr-background-color-row-even: var(
+    --jp-layout-color1,
+    hsl(from var(--pst-color-on-background, #111111) h s calc(l + 5))
+  );
+  --xr-background-color-row-odd: var(
+    --jp-layout-color2,
+    hsl(from var(--pst-color-on-background, #111111) h s calc(l + 15))
+  );
 }
 
 .xr-wrap {
   display: block !important;
   min-width: 300px;
   max-width: 700px;
+  line-height: 1.6;
 }
 
 .xr-text-repr-fallback {
@@ -53,11 +100,18 @@ body.vscode-dark {
 }
 
 .xr-obj-type,
-.xr-array-name {
+.xr-obj-name,
+.xr-group-name {
   margin-left: 2px;
   margin-right: 10px;
 }
 
+.xr-group-name::before {
+  content: "📁";
+  padding-right: 0.3em;
+}
+
+.xr-group-name,
 .xr-obj-type {
   color: var(--xr-font-color2);
 }
@@ -66,6 +120,8 @@ body.vscode-dark {
   padding-left: 0 !important;
   display: grid;
   grid-template-columns: 150px auto auto 1fr 0 20px 0 20px;
+  margin-block-start: 0;
+  margin-block-end: 0;
 }
 
 .xr-section-item {
@@ -76,10 +132,12 @@ body.vscode-dark {
   display: inline-block;
   opacity: 0;
   height: 0;
+  margin: 0;
 }
 
 .xr-section-item input + label {
   color: var(--xr-disabled-color);
+  border: 2px solid transparent !important;
 }
 
 .xr-section-item input:enabled + label {
@@ -88,7 +146,7 @@ body.vscode-dark {
 }
 
 .xr-section-item input:focus + label {
-  border: 2px solid var(--xr-font-color0);
+  border: 2px solid var(--xr-font-color0) !important;
 }
 
 .xr-section-item input:enabled + label:hover {
@@ -133,7 +191,6 @@ body.vscode-dark {
 .xr-section-summary,
 .xr-section-inline-details {
   padding-top: 4px;
-  padding-bottom: 4px;
 }
 
 .xr-section-inline-details {
@@ -143,6 +200,7 @@ body.vscode-dark {
 .xr-section-details {
   display: none;
   grid-column: 1 / -1;
+  margin-top: 4px;
   margin-bottom: 5px;
 }
 
@@ -150,6 +208,32 @@ body.vscode-dark {
   display: contents;
 }
 
+.xr-group-box {
+  display: inline-grid;
+  grid-template-columns: 0px 20px auto;
+  width: 100%;
+}
+
+.xr-group-box-vline {
+  grid-column-start: 1;
+  border-right: 0.2em solid;
+  border-color: var(--xr-border-color);
+  width: 0px;
+}
+
+.xr-group-box-hline {
+  grid-column-start: 2;
+  grid-row-start: 1;
+  height: 1em;
+  width: 20px;
+  border-bottom: 0.2em solid;
+  border-color: var(--xr-border-color);
+}
+
+.xr-group-box-contents {
+  grid-column-start: 3;
+}
+
 .xr-array-wrap {
   grid-column: 1 / -1;
   display: grid;
@@ -220,7 +304,9 @@ body.vscode-dark {
 .xr-var-item label,
 .xr-var-item > .xr-var-name span {
   background-color: var(--xr-background-color-row-even);
+  border-color: var(--xr-background-color-row-odd);
   margin-bottom: 0;
+  padding-top: 2px;
 }
 
 .xr-var-item > .xr-var-name:hover span {
@@ -231,6 +317,7 @@ body.vscode-dark {
 .xr-var-list > li:nth-child(odd) > label,
 .xr-var-list > li:nth-child(odd) > .xr-var-name span {
   background-color: var(--xr-background-color-row-odd);
+  border-color: var(--xr-background-color-row-even);
 }
 
 .xr-var-name {
@@ -280,8 +367,15 @@ body.vscode-dark {
 .xr-var-data,
 .xr-index-data {
   display: none;
-  background-color: var(--xr-background-color) !important;
-  padding-bottom: 5px !important;
+  border-top: 2px dotted var(--xr-background-color);
+  padding-bottom: 20px !important;
+  padding-top: 10px !important;
+}
+
+.xr-var-attrs-in + label,
+.xr-var-data-in + label,
+.xr-index-data-in + label {
+  padding: 0 1px;
 }
 
 .xr-var-attrs-in:checked ~ .xr-var-attrs,
@@ -294,6 +388,12 @@ body.vscode-dark {
   float: right;
 }
 
+.xr-var-data > pre,
+.xr-index-data > pre,
+.xr-var-data > table > tbody > tr {
+  background-color: transparent !important;
+}
+
 .xr-var-name span,
 .xr-var-data,
 .xr-index-name div,
@@ -353,3 +453,11 @@ dl.xr-attrs {
   stroke: currentColor;
   fill: currentColor;
 }
+
+.xr-var-attrs-in:checked + label > .xr-icon-file-text2,
+.xr-var-data-in:checked + label > .xr-icon-database,
+.xr-index-data-in:checked + label > .xr-icon-database {
+  color: var(--xr-font-color0);
+  filter: drop-shadow(1px 1px 5px var(--xr-font-color2));
+  stroke-width: 0.8px;
+}
diff -pruN 2025.03.1-8/xarray/structure/alignment.py 2025.10.1-1/xarray/structure/alignment.py
--- 2025.03.1-8/xarray/structure/alignment.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/xarray/structure/alignment.py	2025-10-10 10:38:05.000000000 +0000
@@ -5,7 +5,8 @@ import operator
 from collections import defaultdict
 from collections.abc import Callable, Hashable, Iterable, Mapping
 from contextlib import suppress
-from typing import TYPE_CHECKING, Any, Final, Generic, TypeVar, cast, overload
+from itertools import starmap
+from typing import TYPE_CHECKING, Any, Final, Generic, TypeVar, get_args, overload
 
 import numpy as np
 import pandas as pd
@@ -19,22 +20,26 @@ from xarray.core.indexes import (
     indexes_all_equal,
     safe_cast_to_index,
 )
-from xarray.core.types import T_Alignable
-from xarray.core.utils import is_dict_like, is_full_slice
+from xarray.core.types import JoinOptions, T_Alignable
+from xarray.core.utils import emit_user_level_warning, is_dict_like, is_full_slice
 from xarray.core.variable import Variable, as_compatible_data, calculate_dimensions
+from xarray.util.deprecation_helpers import CombineKwargDefault
 
 if TYPE_CHECKING:
     from xarray.core.dataarray import DataArray
     from xarray.core.dataset import Dataset
     from xarray.core.types import (
         Alignable,
-        JoinOptions,
         T_DataArray,
         T_Dataset,
         T_DuckArray,
     )
 
 
+class AlignmentError(ValueError):
+    """Error class for alignment failures due to incompatible arguments."""
+
+
 def reindex_variables(
     variables: Mapping[Any, Variable],
     dim_pos_indexers: Mapping[Any, Any],
@@ -90,10 +95,49 @@ def reindex_variables(
     return new_variables
 
 
+def _normalize_indexes(
+    indexes: Mapping[Any, Any | T_DuckArray],
+) -> Indexes:
+    """Normalize the indexes/indexers given for re-indexing or alignment.
+
+    Wrap any arbitrary array or `pandas.Index` as an Xarray `PandasIndex`
+    associated with its corresponding dimension coordinate variable.
+
+    """
+    xr_indexes: dict[Hashable, Index] = {}
+    xr_variables: dict[Hashable, Variable]
+
+    if isinstance(indexes, Indexes):
+        xr_variables = dict(indexes.variables)
+    else:
+        xr_variables = {}
+
+    for k, idx in indexes.items():
+        if not isinstance(idx, Index):
+            if getattr(idx, "dims", (k,)) != (k,):
+                raise AlignmentError(
+                    f"Indexer has dimensions {idx.dims} that are different "
+                    f"from that to be indexed along '{k}'"
+                )
+            data: T_DuckArray = as_compatible_data(idx)
+            pd_idx = safe_cast_to_index(data)
+            if pd_idx.name != k:
+                pd_idx = pd_idx.copy()
+                pd_idx.name = k
+            if isinstance(pd_idx, pd.MultiIndex):
+                idx = PandasMultiIndex(pd_idx, k)
+            else:
+                idx = PandasIndex(pd_idx, k, coord_dtype=data.dtype)
+            xr_variables.update(idx.create_variables())
+        xr_indexes[k] = idx
+
+    return Indexes(xr_indexes, xr_variables)
+
+
 CoordNamesAndDims = tuple[tuple[Hashable, tuple[Hashable, ...]], ...]
 MatchingIndexKey = tuple[CoordNamesAndDims, type[Index]]
-NormalizedIndexes = dict[MatchingIndexKey, Index]
-NormalizedIndexVars = dict[MatchingIndexKey, dict[Hashable, Variable]]
+IndexesToAlign = dict[MatchingIndexKey, Index]
+IndexVarsToAlign = dict[MatchingIndexKey, dict[Hashable, Variable]]
 
 
 class Aligner(Generic[T_Alignable]):
@@ -111,8 +155,10 @@ class Aligner(Generic[T_Alignable]):
 
     objects: tuple[T_Alignable, ...]
     results: tuple[T_Alignable, ...]
-    objects_matching_indexes: tuple[dict[MatchingIndexKey, Index], ...]
-    join: str
+    objects_matching_index_vars: tuple[
+        dict[MatchingIndexKey, dict[Hashable, Variable]], ...
+    ]
+    join: JoinOptions | CombineKwargDefault
     exclude_dims: frozenset[Hashable]
     exclude_vars: frozenset[Hashable]
     copy: bool
@@ -125,6 +171,7 @@ class Aligner(Generic[T_Alignable]):
     aligned_indexes: dict[MatchingIndexKey, Index]
     aligned_index_vars: dict[MatchingIndexKey, dict[Hashable, Variable]]
     reindex: dict[MatchingIndexKey, bool]
+    keep_original_indexes: set[MatchingIndexKey]
     reindex_kwargs: dict[str, Any]
     unindexed_dim_sizes: dict[Hashable, set]
     new_indexes: Indexes[Index]
@@ -132,7 +179,7 @@ class Aligner(Generic[T_Alignable]):
     def __init__(
         self,
         objects: Iterable[T_Alignable],
-        join: str = "inner",
+        join: JoinOptions | CombineKwargDefault = "inner",
         indexes: Mapping[Any, Any] | None = None,
         exclude_dims: str | Iterable[Hashable] = frozenset(),
         exclude_vars: Iterable[Hashable] = frozenset(),
@@ -143,9 +190,12 @@ class Aligner(Generic[T_Alignable]):
         sparse: bool = False,
     ):
         self.objects = tuple(objects)
-        self.objects_matching_indexes = ()
+        self.objects_matching_indexes: tuple[Any, ...] = ()
+        self.objects_matching_index_vars = ()
 
-        if join not in ["inner", "outer", "override", "exact", "left", "right"]:
+        if not isinstance(join, CombineKwargDefault) and join not in get_args(
+            JoinOptions
+        ):
             raise ValueError(f"invalid value for join: {join}")
         self.join = join
 
@@ -165,7 +215,9 @@ class Aligner(Generic[T_Alignable]):
 
         if indexes is None:
             indexes = {}
-        self.indexes, self.index_vars = self._normalize_indexes(indexes)
+        self.indexes, self.index_vars = self._collect_indexes(
+            _normalize_indexes(indexes)
+        )
 
         self.all_indexes = {}
         self.all_index_vars = {}
@@ -174,85 +226,85 @@ class Aligner(Generic[T_Alignable]):
         self.aligned_indexes = {}
         self.aligned_index_vars = {}
         self.reindex = {}
+        self.keep_original_indexes = set()
 
         self.results = tuple()
 
-    def _normalize_indexes(
-        self,
-        indexes: Mapping[Any, Any | T_DuckArray],
-    ) -> tuple[NormalizedIndexes, NormalizedIndexVars]:
-        """Normalize the indexes/indexers used for re-indexing or alignment.
-
-        Return dictionaries of xarray Index objects and coordinate variables
-        such that we can group matching indexes based on the dictionary keys.
+    def _collect_indexes(
+        self, indexes: Indexes
+    ) -> tuple[IndexesToAlign, IndexVarsToAlign]:
+        """Collect input and/or object indexes for alignment.
+
+        Return new dictionaries of xarray Index objects and coordinate
+        variables, whose keys are used to later retrieve all the indexes to
+        compare with each other (based on the name and dimensions of their
+        associated coordinate variables as well as the Index type).
 
         """
-        if isinstance(indexes, Indexes):
-            xr_variables = dict(indexes.variables)
-        else:
-            xr_variables = {}
+        collected_indexes = {}
+        collected_index_vars = {}
 
-        xr_indexes: dict[Hashable, Index] = {}
-        for k, idx in indexes.items():
-            if not isinstance(idx, Index):
-                if getattr(idx, "dims", (k,)) != (k,):
-                    raise ValueError(
-                        f"Indexer has dimensions {idx.dims} that are different "
-                        f"from that to be indexed along '{k}'"
-                    )
-                data: T_DuckArray = as_compatible_data(idx)
-                pd_idx = safe_cast_to_index(data)
-                pd_idx.name = k
-                if isinstance(pd_idx, pd.MultiIndex):
-                    idx = PandasMultiIndex(pd_idx, k)
-                else:
-                    idx = PandasIndex(pd_idx, k, coord_dtype=data.dtype)
-                xr_variables.update(idx.create_variables())
-            xr_indexes[k] = idx
-
-        normalized_indexes = {}
-        normalized_index_vars = {}
-        for idx, index_vars in Indexes(xr_indexes, xr_variables).group_by_index():
-            coord_names_and_dims = []
-            all_dims: set[Hashable] = set()
+        for idx, idx_vars in indexes.group_by_index():
+            idx_coord_names_and_dims = []
+            idx_all_dims: set[Hashable] = set()
 
-            for name, var in index_vars.items():
+            for name, var in idx_vars.items():
                 dims = var.dims
-                coord_names_and_dims.append((name, dims))
-                all_dims.update(dims)
+                idx_coord_names_and_dims.append((name, dims))
+                idx_all_dims.update(dims)
 
-            exclude_dims = all_dims & self.exclude_dims
-            if exclude_dims == all_dims:
-                continue
-            elif exclude_dims:
-                excl_dims_str = ", ".join(str(d) for d in exclude_dims)
-                incl_dims_str = ", ".join(str(d) for d in all_dims - exclude_dims)
-                raise ValueError(
-                    f"cannot exclude dimension(s) {excl_dims_str} from alignment because "
-                    "these are used by an index together with non-excluded dimensions "
-                    f"{incl_dims_str}"
-                )
+            key: MatchingIndexKey = (tuple(idx_coord_names_and_dims), type(idx))
 
-            key = (tuple(coord_names_and_dims), type(idx))
-            normalized_indexes[key] = idx
-            normalized_index_vars[key] = index_vars
+            if idx_all_dims:
+                exclude_dims = idx_all_dims & self.exclude_dims
+                if exclude_dims == idx_all_dims:
+                    # Do not collect an index if all the dimensions it uses are
+                    # also excluded from the alignment
+                    continue
+                elif exclude_dims:
+                    # If the dimensions used by index partially overlap with the dimensions
+                    # excluded from alignment, it is possible to check index equality along
+                    # non-excluded dimensions only. However, in this case each of the aligned
+                    # objects must retain (a copy of) their original index. Re-indexing and
+                    # overriding the index are not supported.
+                    if self.join == "override":
+                        excl_dims_str = ", ".join(str(d) for d in exclude_dims)
+                        incl_dims_str = ", ".join(
+                            str(d) for d in idx_all_dims - exclude_dims
+                        )
+                        raise AlignmentError(
+                            f"cannot exclude dimension(s) {excl_dims_str} from alignment "
+                            "with `join='override` because these are used by an index "
+                            f"together with non-excluded dimensions {incl_dims_str}"
+                            "(cannot safely override the index)."
+                        )
+                    else:
+                        self.keep_original_indexes.add(key)
+
+            collected_indexes[key] = idx
+            collected_index_vars[key] = idx_vars
 
-        return normalized_indexes, normalized_index_vars
+        return collected_indexes, collected_index_vars
 
     def find_matching_indexes(self) -> None:
         all_indexes: dict[MatchingIndexKey, list[Index]]
         all_index_vars: dict[MatchingIndexKey, list[dict[Hashable, Variable]]]
         all_indexes_dim_sizes: dict[MatchingIndexKey, dict[Hashable, set]]
         objects_matching_indexes: list[dict[MatchingIndexKey, Index]]
+        objects_matching_index_vars: list[
+            dict[MatchingIndexKey, dict[Hashable, Variable]]
+        ]
 
         all_indexes = defaultdict(list)
         all_index_vars = defaultdict(list)
         all_indexes_dim_sizes = defaultdict(lambda: defaultdict(set))
         objects_matching_indexes = []
+        objects_matching_index_vars = []
 
         for obj in self.objects:
-            obj_indexes, obj_index_vars = self._normalize_indexes(obj.xindexes)
+            obj_indexes, obj_index_vars = self._collect_indexes(obj.xindexes)
             objects_matching_indexes.append(obj_indexes)
+            objects_matching_index_vars.append(obj_index_vars)
             for key, idx in obj_indexes.items():
                 all_indexes[key].append(idx)
             for key, index_vars in obj_index_vars.items():
@@ -261,6 +313,7 @@ class Aligner(Generic[T_Alignable]):
                     all_indexes_dim_sizes[key][dim].add(size)
 
         self.objects_matching_indexes = tuple(objects_matching_indexes)
+        self.objects_matching_index_vars = tuple(objects_matching_index_vars)
         self.all_indexes = all_indexes
         self.all_index_vars = all_index_vars
 
@@ -268,7 +321,7 @@ class Aligner(Generic[T_Alignable]):
             for dim_sizes in all_indexes_dim_sizes.values():
                 for dim, sizes in dim_sizes.items():
                     if len(sizes) > 1:
-                        raise ValueError(
+                        raise AlignmentError(
                             "cannot align objects with join='override' with matching indexes "
                             f"along dimension {dim!r} that don't have the same size"
                         )
@@ -283,47 +336,6 @@ class Aligner(Generic[T_Alignable]):
 
         self.unindexed_dim_sizes = unindexed_dim_sizes
 
-    def assert_no_index_conflict(self) -> None:
-        """Check for uniqueness of both coordinate and dimension names across all sets
-        of matching indexes.
-
-        We need to make sure that all indexes used for re-indexing or alignment
-        are fully compatible and do not conflict each other.
-
-        Note: perhaps we could choose less restrictive constraints and instead
-        check for conflicts among the dimension (position) indexers returned by
-        `Index.reindex_like()` for each matching pair of object index / aligned
-        index?
-        (ref: https://github.com/pydata/xarray/issues/1603#issuecomment-442965602)
-
-        """
-        matching_keys = set(self.all_indexes) | set(self.indexes)
-
-        coord_count: dict[Hashable, int] = defaultdict(int)
-        dim_count: dict[Hashable, int] = defaultdict(int)
-        for coord_names_dims, _ in matching_keys:
-            dims_set: set[Hashable] = set()
-            for name, dims in coord_names_dims:
-                coord_count[name] += 1
-                dims_set.update(dims)
-            for dim in dims_set:
-                dim_count[dim] += 1
-
-        for count, msg in [(coord_count, "coordinates"), (dim_count, "dimensions")]:
-            dup = {k: v for k, v in count.items() if v > 1}
-            if dup:
-                items_msg = ", ".join(
-                    f"{k!r} ({v} conflicting indexes)" for k, v in dup.items()
-                )
-                raise ValueError(
-                    "cannot re-index or align objects with conflicting indexes found for "
-                    f"the following {msg}: {items_msg}\n"
-                    "Conflicting indexes may occur when\n"
-                    "- they relate to different sets of coordinate and/or dimension names\n"
-                    "- they don't have the same type\n"
-                    "- they may be used to reindex data along common dimensions"
-                )
-
     def _need_reindex(self, dim, cmp_indexes) -> bool:
         """Whether or not we need to reindex variables for a set of
         matching indexes.
@@ -335,7 +347,7 @@ class Aligner(Generic[T_Alignable]):
           pandas). This is useful, e.g., for overwriting such duplicate indexes.
 
         """
-        if not indexes_all_equal(cmp_indexes):
+        if not indexes_all_equal(cmp_indexes, self.exclude_dims):
             # always reindex when matching indexes are not equal
             return True
 
@@ -383,11 +395,33 @@ class Aligner(Generic[T_Alignable]):
     def align_indexes(self) -> None:
         """Compute all aligned indexes and their corresponding coordinate variables."""
 
-        aligned_indexes = {}
-        aligned_index_vars = {}
-        reindex = {}
-        new_indexes = {}
-        new_index_vars = {}
+        aligned_indexes: dict[MatchingIndexKey, Index] = {}
+        aligned_index_vars: dict[MatchingIndexKey, dict[Hashable, Variable]] = {}
+        reindex: dict[MatchingIndexKey, bool] = {}
+        new_indexes: dict[Hashable, Index] = {}
+        new_index_vars: dict[Hashable, Variable] = {}
+
+        def update_dicts(
+            key: MatchingIndexKey,
+            idx: Index,
+            idx_vars: dict[Hashable, Variable],
+            need_reindex: bool,
+        ):
+            reindex[key] = need_reindex
+            aligned_indexes[key] = idx
+            aligned_index_vars[key] = idx_vars
+
+            for name, var in idx_vars.items():
+                if name in new_indexes:
+                    other_idx = new_indexes[name]
+                    other_var = new_index_vars[name]
+                    raise AlignmentError(
+                        f"cannot align objects on coordinate {name!r} because of conflicting indexes\n"
+                        f"first index: {idx!r}\nsecond index: {other_idx!r}\n"
+                        f"first variable: {var!r}\nsecond variable: {other_var!r}\n"
+                    )
+                new_indexes[name] = idx
+                new_index_vars[name] = var
 
         for key, matching_indexes in self.all_indexes.items():
             matching_index_vars = self.all_index_vars[key]
@@ -418,12 +452,34 @@ class Aligner(Generic[T_Alignable]):
                 else:
                     need_reindex = False
                 if need_reindex:
+                    if (
+                        isinstance(self.join, CombineKwargDefault)
+                        and self.join != "exact"
+                    ):
+                        emit_user_level_warning(
+                            self.join.warning_message(
+                                "This change will result in the following ValueError: "
+                                "cannot be aligned with join='exact' because "
+                                "index/labels/sizes are not equal along "
+                                "these coordinates (dimensions): "
+                                + ", ".join(
+                                    f"{name!r} {dims!r}" for name, dims in key[0]
+                                ),
+                                recommend_set_options=False,
+                            ),
+                            FutureWarning,
+                        )
                     if self.join == "exact":
-                        raise ValueError(
+                        raise AlignmentError(
                             "cannot align objects with join='exact' where "
                             "index/labels/sizes are not equal along "
                             "these coordinates (dimensions): "
                             + ", ".join(f"{name!r} {dims!r}" for name, dims in key[0])
+                            + (
+                                self.join.error_message()
+                                if isinstance(self.join, CombineKwargDefault)
+                                else ""
+                            )
                         )
                     joiner = self._get_index_joiner(index_cls)
                     joined_index = joiner(matching_indexes)
@@ -437,25 +493,14 @@ class Aligner(Generic[T_Alignable]):
                     joined_index = matching_indexes[0]
                     joined_index_vars = matching_index_vars[0]
 
-            reindex[key] = need_reindex
-            aligned_indexes[key] = joined_index
-            aligned_index_vars[key] = joined_index_vars
-
-            for name, var in joined_index_vars.items():
-                new_indexes[name] = joined_index
-                new_index_vars[name] = var
+            update_dicts(key, joined_index, joined_index_vars, need_reindex)
 
         # Explicitly provided indexes that are not found in objects to align
         # may relate to unindexed dimensions so we add them too
         for key, idx in self.indexes.items():
             if key not in aligned_indexes:
                 index_vars = self.index_vars[key]
-                reindex[key] = False
-                aligned_indexes[key] = idx
-                aligned_index_vars[key] = index_vars
-                for name, var in index_vars.items():
-                    new_indexes[name] = idx
-                    new_index_vars[name] = var
+                update_dicts(key, idx, index_vars, False)
 
         self.aligned_indexes = aligned_indexes
         self.aligned_index_vars = aligned_index_vars
@@ -474,7 +519,7 @@ class Aligner(Generic[T_Alignable]):
             else:
                 add_err_msg = ""
             if len(sizes) > 1:
-                raise ValueError(
+                raise AlignmentError(
                     f"cannot reindex or align along dimension {dim!r} "
                     f"because of conflicting dimension sizes: {sizes!r}" + add_err_msg
                 )
@@ -502,14 +547,31 @@ class Aligner(Generic[T_Alignable]):
         self,
         matching_indexes: dict[MatchingIndexKey, Index],
     ) -> dict[Hashable, Any]:
-        dim_pos_indexers = {}
+        dim_pos_indexers: dict[Hashable, Any] = {}
+        dim_index: dict[Hashable, Index] = {}
 
         for key, aligned_idx in self.aligned_indexes.items():
             obj_idx = matching_indexes.get(key)
-            if obj_idx is not None:
-                if self.reindex[key]:
-                    indexers = obj_idx.reindex_like(aligned_idx, **self.reindex_kwargs)
-                    dim_pos_indexers.update(indexers)
+            if obj_idx is not None and self.reindex[key]:
+                indexers = obj_idx.reindex_like(aligned_idx, **self.reindex_kwargs)
+                for dim, idxer in indexers.items():
+                    if dim in self.exclude_dims:
+                        raise AlignmentError(
+                            f"cannot reindex or align along dimension {dim!r} because "
+                            "it is explicitly excluded from alignment. This is likely caused by "
+                            "wrong results returned by the `reindex_like` method of this index:\n"
+                            f"{obj_idx!r}"
+                        )
+                    if dim in dim_pos_indexers and not np.array_equal(
+                        idxer, dim_pos_indexers[dim]
+                    ):
+                        raise AlignmentError(
+                            f"cannot reindex or align along dimension {dim!r} because "
+                            "of conflicting re-indexers returned by multiple indexes\n"
+                            f"first index: {obj_idx!r}\nsecond index: {dim_index[dim]!r}\n"
+                        )
+                    dim_pos_indexers[dim] = idxer
+                    dim_index[dim] = obj_idx
 
         return dim_pos_indexers
 
@@ -517,22 +579,37 @@ class Aligner(Generic[T_Alignable]):
         self,
         obj: T_Alignable,
         matching_indexes: dict[MatchingIndexKey, Index],
+        matching_index_vars: dict[MatchingIndexKey, dict[Hashable, Variable]],
     ) -> tuple[dict[Hashable, Index], dict[Hashable, Variable]]:
         new_indexes = {}
         new_variables = {}
 
         for key, aligned_idx in self.aligned_indexes.items():
-            index_vars = self.aligned_index_vars[key]
+            aligned_idx_vars = self.aligned_index_vars[key]
             obj_idx = matching_indexes.get(key)
+            obj_idx_vars = matching_index_vars.get(key)
+
             if obj_idx is None:
-                # add the index if it relates to unindexed dimensions in obj
-                index_vars_dims = {d for var in index_vars.values() for d in var.dims}
-                if index_vars_dims <= set(obj.dims):
+                # add the aligned index if it relates to unindexed dimensions in obj
+                dims = {d for var in aligned_idx_vars.values() for d in var.dims}
+                if dims <= set(obj.dims):
                     obj_idx = aligned_idx
+
             if obj_idx is not None:
-                for name, var in index_vars.items():
-                    new_indexes[name] = aligned_idx
-                    new_variables[name] = var.copy(deep=self.copy)
+                # TODO: always copy object's index when no re-indexing is required?
+                # (instead of assigning the aligned index)
+                # (need performance assessment)
+                if key in self.keep_original_indexes:
+                    assert self.reindex[key] is False
+                    new_idx = obj_idx.copy(deep=self.copy)
+                    new_idx_vars = new_idx.create_variables(obj_idx_vars)
+                else:
+                    new_idx = aligned_idx
+                    new_idx_vars = {
+                        k: v.copy(deep=self.copy) for k, v in aligned_idx_vars.items()
+                    }
+                new_indexes.update(dict.fromkeys(new_idx_vars, new_idx))
+                new_variables.update(new_idx_vars)
 
         return new_indexes, new_variables
 
@@ -540,8 +617,11 @@ class Aligner(Generic[T_Alignable]):
         self,
         obj: T_Alignable,
         matching_indexes: dict[MatchingIndexKey, Index],
+        matching_index_vars: dict[MatchingIndexKey, dict[Hashable, Variable]],
     ) -> T_Alignable:
-        new_indexes, new_variables = self._get_indexes_and_vars(obj, matching_indexes)
+        new_indexes, new_variables = self._get_indexes_and_vars(
+            obj, matching_indexes, matching_index_vars
+        )
         dim_pos_indexers = self._get_dim_pos_indexers(matching_indexes)
 
         return obj._reindex_callback(
@@ -556,9 +636,14 @@ class Aligner(Generic[T_Alignable]):
 
     def reindex_all(self) -> None:
         self.results = tuple(
-            self._reindex_one(obj, matching_indexes)
-            for obj, matching_indexes in zip(
-                self.objects, self.objects_matching_indexes, strict=True
+            starmap(
+                self._reindex_one,
+                zip(
+                    self.objects,
+                    self.objects_matching_indexes,
+                    self.objects_matching_index_vars,
+                    strict=True,
+                ),
             )
         )
 
@@ -571,7 +656,6 @@ class Aligner(Generic[T_Alignable]):
 
         self.find_matching_indexes()
         self.find_matching_unindexed_dims()
-        self.assert_no_index_conflict()
         self.align_indexes()
         self.assert_unindexed_dim_sizes_equal()
 
@@ -595,7 +679,7 @@ def align(
     obj1: T_Obj1,
     /,
     *,
-    join: JoinOptions = "inner",
+    join: JoinOptions | CombineKwargDefault = "inner",
     copy: bool = True,
     indexes=None,
     exclude: str | Iterable[Hashable] = frozenset(),
@@ -609,7 +693,7 @@ def align(
     obj2: T_Obj2,
     /,
     *,
-    join: JoinOptions = "inner",
+    join: JoinOptions | CombineKwargDefault = "inner",
     copy: bool = True,
     indexes=None,
     exclude: str | Iterable[Hashable] = frozenset(),
@@ -624,7 +708,7 @@ def align(
     obj3: T_Obj3,
     /,
     *,
-    join: JoinOptions = "inner",
+    join: JoinOptions | CombineKwargDefault = "inner",
     copy: bool = True,
     indexes=None,
     exclude: str | Iterable[Hashable] = frozenset(),
@@ -640,7 +724,7 @@ def align(
     obj4: T_Obj4,
     /,
     *,
-    join: JoinOptions = "inner",
+    join: JoinOptions | CombineKwargDefault = "inner",
     copy: bool = True,
     indexes=None,
     exclude: str | Iterable[Hashable] = frozenset(),
@@ -657,7 +741,7 @@ def align(
     obj5: T_Obj5,
     /,
     *,
-    join: JoinOptions = "inner",
+    join: JoinOptions | CombineKwargDefault = "inner",
     copy: bool = True,
     indexes=None,
     exclude: str | Iterable[Hashable] = frozenset(),
@@ -668,7 +752,7 @@ def align(
 @overload
 def align(
     *objects: T_Alignable,
-    join: JoinOptions = "inner",
+    join: JoinOptions | CombineKwargDefault = "inner",
     copy: bool = True,
     indexes=None,
     exclude: str | Iterable[Hashable] = frozenset(),
@@ -678,7 +762,7 @@ def align(
 
 def align(
     *objects: T_Alignable,
-    join: JoinOptions = "inner",
+    join: JoinOptions | CombineKwargDefault = "inner",
     copy: bool = True,
     indexes=None,
     exclude: str | Iterable[Hashable] = frozenset(),
@@ -735,7 +819,7 @@ def align(
 
     Raises
     ------
-    ValueError
+    AlignmentError
         If any dimensions without labels on the arguments have different sizes,
         or a different size than the size of the aligned dimension labels.
 
@@ -853,7 +937,7 @@ def align(
     >>> a, b = xr.align(x, y, join="exact")
     Traceback (most recent call last):
     ...
-    ValueError: cannot align objects with join='exact' ...
+    xarray.structure.alignment.AlignmentError: cannot align objects with join='exact' ...
 
     >>> a, b = xr.align(x, y, join="override")
     >>> a
@@ -886,7 +970,7 @@ def align(
 
 def deep_align(
     objects: Iterable[Any],
-    join: JoinOptions = "inner",
+    join: JoinOptions | CombineKwargDefault = "inner",
     copy: bool = True,
     indexes=None,
     exclude: str | Iterable[Hashable] = frozenset(),
@@ -1081,9 +1165,9 @@ def _broadcast_helper(
 
     # remove casts once https://github.com/python/mypy/issues/12800 is resolved
     if isinstance(arg, DataArray):
-        return cast(T_Alignable, _broadcast_array(arg))
+        return _broadcast_array(arg)  # type: ignore[return-value,unused-ignore]
     elif isinstance(arg, Dataset):
-        return cast(T_Alignable, _broadcast_dataset(arg))
+        return _broadcast_dataset(arg)  # type: ignore[return-value,unused-ignore]
     else:
         raise ValueError("all input must be Dataset or DataArray objects")
 
diff -pruN 2025.03.1-8/xarray/structure/chunks.py 2025.10.1-1/xarray/structure/chunks.py
--- 2025.03.1-8/xarray/structure/chunks.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/xarray/structure/chunks.py	2025-10-10 10:38:05.000000000 +0000
@@ -74,7 +74,7 @@ def _get_chunk(var: Variable, chunks, ch
     # Determine the explicit requested chunks.
     preferred_chunks = var.encoding.get("preferred_chunks", {})
     preferred_chunk_shape = tuple(
-        preferred_chunks.get(dim, size) for dim, size in zip(dims, shape, strict=True)
+        itertools.starmap(preferred_chunks.get, zip(dims, shape, strict=True))
     )
     if isinstance(chunks, Number) or (chunks == "auto"):
         chunks = dict.fromkeys(dims, chunks)
@@ -138,7 +138,7 @@ def _maybe_chunk(
             # by providing chunks as an input to tokenize.
             # subtle bugs result otherwise. see GH3350
             # we use str() for speed, and use the name for the final array name on the next line
-            token2 = tokenize(token if token else var._data, str(chunks))
+            token2 = tokenize(token or var._data, str(chunks))
             name2 = f"{name_prefix}{name}-{token2}"
 
             from_array_kwargs = utils.consolidate_dask_from_array_kwargs(
@@ -167,15 +167,15 @@ _V = TypeVar("_V", bound=Union["Dataset"
 
 
 @overload
-def unify_chunks(__obj: _T) -> tuple[_T]: ...
+def unify_chunks(obj: _T, /) -> tuple[_T]: ...
 
 
 @overload
-def unify_chunks(__obj1: _T, __obj2: _U) -> tuple[_T, _U]: ...
+def unify_chunks(obj1: _T, obj2: _U, /) -> tuple[_T, _U]: ...
 
 
 @overload
-def unify_chunks(__obj1: _T, __obj2: _U, __obj3: _V) -> tuple[_T, _U, _V]: ...
+def unify_chunks(obj1: _T, obj2: _U, obj3: _V, /) -> tuple[_T, _U, _V]: ...
 
 
 @overload
diff -pruN 2025.03.1-8/xarray/structure/combine.py 2025.10.1-1/xarray/structure/combine.py
--- 2025.03.1-8/xarray/structure/combine.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/xarray/structure/combine.py	2025-10-10 10:38:05.000000000 +0000
@@ -10,8 +10,16 @@ from xarray.core import dtypes
 from xarray.core.dataarray import DataArray
 from xarray.core.dataset import Dataset
 from xarray.core.utils import iterate_nested
+from xarray.structure.alignment import AlignmentError
 from xarray.structure.concat import concat
 from xarray.structure.merge import merge
+from xarray.util.deprecation_helpers import (
+    _COMPAT_DEFAULT,
+    _COORDS_DEFAULT,
+    _DATA_VARS_DEFAULT,
+    _JOIN_DEFAULT,
+    CombineKwargDefault,
+)
 
 if TYPE_CHECKING:
     from xarray.core.types import (
@@ -200,12 +208,12 @@ def _check_shape_tile_ids(combined_tile_
 def _combine_nd(
     combined_ids,
     concat_dims,
-    data_vars="all",
-    coords="different",
-    compat: CompatOptions = "no_conflicts",
-    fill_value=dtypes.NA,
-    join: JoinOptions = "outer",
-    combine_attrs: CombineAttrsOptions = "drop",
+    data_vars,
+    coords,
+    compat: CompatOptions | CombineKwargDefault,
+    fill_value,
+    join: JoinOptions | CombineKwargDefault,
+    combine_attrs: CombineAttrsOptions,
 ):
     """
     Combines an N-dimensional structure of datasets into one by applying a
@@ -262,10 +270,10 @@ def _combine_all_along_first_dim(
     dim,
     data_vars,
     coords,
-    compat: CompatOptions,
-    fill_value=dtypes.NA,
-    join: JoinOptions = "outer",
-    combine_attrs: CombineAttrsOptions = "drop",
+    compat: CompatOptions | CombineKwargDefault,
+    fill_value,
+    join: JoinOptions | CombineKwargDefault,
+    combine_attrs: CombineAttrsOptions,
 ):
     # Group into lines of datasets which must be combined along dim
     grouped = groupby_defaultdict(list(combined_ids.items()), key=_new_tile_id)
@@ -276,7 +284,14 @@ def _combine_all_along_first_dim(
         combined_ids = dict(sorted(group))
         datasets = combined_ids.values()
         new_combined_ids[new_id] = _combine_1d(
-            datasets, dim, compat, data_vars, coords, fill_value, join, combine_attrs
+            datasets,
+            concat_dim=dim,
+            compat=compat,
+            data_vars=data_vars,
+            coords=coords,
+            fill_value=fill_value,
+            join=join,
+            combine_attrs=combine_attrs,
         )
     return new_combined_ids
 
@@ -284,12 +299,12 @@ def _combine_all_along_first_dim(
 def _combine_1d(
     datasets,
     concat_dim,
-    compat: CompatOptions = "no_conflicts",
-    data_vars="all",
-    coords="different",
-    fill_value=dtypes.NA,
-    join: JoinOptions = "outer",
-    combine_attrs: CombineAttrsOptions = "drop",
+    compat: CompatOptions | CombineKwargDefault,
+    data_vars,
+    coords,
+    fill_value,
+    join: JoinOptions | CombineKwargDefault,
+    combine_attrs: CombineAttrsOptions,
 ):
     """
     Applies either concat or merge to 1D list of datasets depending on value
@@ -320,19 +335,26 @@ def _combine_1d(
             else:
                 raise
     else:
-        combined = merge(
-            datasets,
-            compat=compat,
-            fill_value=fill_value,
-            join=join,
-            combine_attrs=combine_attrs,
-        )
+        try:
+            combined = merge(
+                datasets,
+                compat=compat,
+                fill_value=fill_value,
+                join=join,
+                combine_attrs=combine_attrs,
+            )
+        except AlignmentError as e:
+            e.add_note(
+                "If you are intending to concatenate datasets, please specify the concatenation dimension explicitly. "
+                "Using merge to concatenate is quite inefficient."
+            )
+            raise e
 
     return combined
 
 
 def _new_tile_id(single_id_ds_pair):
-    tile_id, ds = single_id_ds_pair
+    tile_id, _ds = single_id_ds_pair
     return tile_id[1:]
 
 
@@ -343,9 +365,9 @@ def _nested_combine(
     data_vars,
     coords,
     ids,
-    fill_value=dtypes.NA,
-    join: JoinOptions = "outer",
-    combine_attrs: CombineAttrsOptions = "drop",
+    fill_value,
+    join: JoinOptions | CombineKwargDefault,
+    combine_attrs: CombineAttrsOptions,
 ):
     if len(datasets) == 0:
         return Dataset()
@@ -366,7 +388,7 @@ def _nested_combine(
     # Apply series of concatenate or merge operations along each dimension
     combined = _combine_nd(
         combined_ids,
-        concat_dims,
+        concat_dims=concat_dims,
         compat=compat,
         data_vars=data_vars,
         coords=coords,
@@ -383,12 +405,12 @@ DATASET_HYPERCUBE = Union[Dataset, Itera
 
 def combine_nested(
     datasets: DATASET_HYPERCUBE,
-    concat_dim: str | DataArray | None | Sequence[str | DataArray | pd.Index | None],
-    compat: str = "no_conflicts",
-    data_vars: str = "all",
-    coords: str = "different",
+    concat_dim: str | DataArray | Sequence[str | DataArray | pd.Index | None] | None,
+    compat: str | CombineKwargDefault = _COMPAT_DEFAULT,
+    data_vars: str | CombineKwargDefault = _DATA_VARS_DEFAULT,
+    coords: str | CombineKwargDefault = _COORDS_DEFAULT,
     fill_value: object = dtypes.NA,
-    join: JoinOptions = "outer",
+    join: JoinOptions | CombineKwargDefault = _JOIN_DEFAULT,
     combine_attrs: CombineAttrsOptions = "drop",
 ) -> Dataset:
     """
@@ -439,9 +461,36 @@ def combine_nested(
           of all non-null values.
         - "override": skip comparing and pick variable from first dataset
     data_vars : {"minimal", "different", "all" or list of str}, optional
-        Details are in the documentation of concat
+        These data variables will be concatenated together:
+          * "minimal": Only data variables in which the dimension already
+            appears are included.
+          * "different": Data variables which are not equal (ignoring
+            attributes) across all datasets are also concatenated (as well as
+            all for which dimension already appears). Beware: this option may
+            load the data payload of data variables into memory if they are not
+            already loaded.
+          * "all": All data variables will be concatenated.
+          * None: Means ``"all"`` if ``dim`` is not present in any of the ``objs``,
+            and ``"minimal"`` if ``dim`` is present in any of ``objs``.
+          * list of dims: The listed data variables will be concatenated, in
+            addition to the "minimal" data variables.
+
     coords : {"minimal", "different", "all" or list of str}, optional
-        Details are in the documentation of concat
+        These coordinate variables will be concatenated together:
+          * "minimal": Only coordinates in which the dimension already appears
+            are included. If concatenating over a dimension _not_
+            present in any of the objects, then all data variables will
+            be concatenated along that new dimension.
+          * "different": Coordinates which are not equal (ignoring attributes)
+            across all datasets are also concatenated (as well as all for which
+            dimension already appears). Beware: this option may load the data
+            payload of coordinate variables into memory if they are not already
+            loaded.
+          * "all": All coordinate variables will be concatenated, except
+            those corresponding to other dimensions.
+          * list of Hashable: The listed coordinate variables will be concatenated,
+            in addition to the "minimal" coordinates.
+
     fill_value : scalar or dict-like, optional
         Value to use for newly missing values. If a dict-like, maps
         variable names to fill values. Use a data array's name to
@@ -619,12 +668,12 @@ def groupby_defaultdict(
 
 def _combine_single_variable_hypercube(
     datasets,
-    fill_value=dtypes.NA,
-    data_vars="all",
-    coords="different",
-    compat: CompatOptions = "no_conflicts",
-    join: JoinOptions = "outer",
-    combine_attrs: CombineAttrsOptions = "no_conflicts",
+    fill_value,
+    data_vars,
+    coords,
+    compat: CompatOptions | CombineKwargDefault,
+    join: JoinOptions | CombineKwargDefault,
+    combine_attrs: CombineAttrsOptions,
 ):
     """
     Attempt to combine a list of Datasets into a hypercube using their
@@ -678,11 +727,14 @@ def _combine_single_variable_hypercube(
 
 def combine_by_coords(
     data_objects: Iterable[Dataset | DataArray] = [],
-    compat: CompatOptions = "no_conflicts",
-    data_vars: Literal["all", "minimal", "different"] | list[str] = "all",
-    coords: str = "different",
+    compat: CompatOptions | CombineKwargDefault = _COMPAT_DEFAULT,
+    data_vars: Literal["all", "minimal", "different"]
+    | None
+    | list[str]
+    | CombineKwargDefault = _DATA_VARS_DEFAULT,
+    coords: str | CombineKwargDefault = _COORDS_DEFAULT,
     fill_value: object = dtypes.NA,
-    join: JoinOptions = "outer",
+    join: JoinOptions | CombineKwargDefault = _JOIN_DEFAULT,
     combine_attrs: CombineAttrsOptions = "no_conflicts",
 ) -> Dataset | DataArray:
     """
@@ -862,7 +914,7 @@ def combine_by_coords(
         temperature    (y, x) float64 96B 10.98 14.3 12.06 ... 1.743 0.4044 16.65
         precipitation  (y, x) float64 96B 0.4376 0.8918 0.9637 ... 0.4615 0.7805
 
-    >>> xr.combine_by_coords([x3, x1])
+    >>> xr.combine_by_coords([x3, x1], join="outer")
     <xarray.Dataset> Size: 464B
     Dimensions:        (y: 4, x: 6)
     Coordinates:
@@ -882,7 +934,7 @@ def combine_by_coords(
         temperature    (y, x) float64 96B 10.98 14.3 12.06 ... 18.89 10.44 8.293
         precipitation  (y, x) float64 96B 0.4376 0.8918 0.9637 ... 0.01879 0.6176
 
-    >>> xr.combine_by_coords([x1, x2, x3])
+    >>> xr.combine_by_coords([x1, x2, x3], join="outer")
     <xarray.Dataset> Size: 464B
     Dimensions:        (y: 4, x: 6)
     Coordinates:
diff -pruN 2025.03.1-8/xarray/structure/concat.py 2025.10.1-1/xarray/structure/concat.py
--- 2025.03.1-8/xarray/structure/concat.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/xarray/structure/concat.py	2025-10-10 10:38:05.000000000 +0000
@@ -1,7 +1,7 @@
 from __future__ import annotations
 
 from collections.abc import Hashable, Iterable
-from typing import TYPE_CHECKING, Any, Union, overload
+from typing import TYPE_CHECKING, Any, Literal, Union, overload
 
 import numpy as np
 import pandas as pd
@@ -11,6 +11,7 @@ from xarray.core.coordinates import Coor
 from xarray.core.duck_array_ops import lazy_array_equiv
 from xarray.core.indexes import Index, PandasIndex
 from xarray.core.types import T_DataArray, T_Dataset, T_Variable
+from xarray.core.utils import emit_user_level_warning
 from xarray.core.variable import Variable
 from xarray.core.variable import concat as concat_vars
 from xarray.structure.alignment import align, reindex_variables
@@ -20,6 +21,13 @@ from xarray.structure.merge import (
     merge_attrs,
     merge_collected,
 )
+from xarray.util.deprecation_helpers import (
+    _COMPAT_CONCAT_DEFAULT,
+    _COORDS_DEFAULT,
+    _DATA_VARS_DEFAULT,
+    _JOIN_DEFAULT,
+    CombineKwargDefault,
+)
 
 if TYPE_CHECKING:
     from xarray.core.types import (
@@ -29,7 +37,7 @@ if TYPE_CHECKING:
         JoinOptions,
     )
 
-    T_DataVars = Union[ConcatOptions, Iterable[Hashable]]
+    T_DataVars = Union[ConcatOptions, Iterable[Hashable], None]
 
 
 # TODO: replace dim: Any by 1D array_likes
@@ -37,12 +45,12 @@ if TYPE_CHECKING:
 def concat(
     objs: Iterable[T_Dataset],
     dim: Hashable | T_Variable | T_DataArray | pd.Index | Any,
-    data_vars: T_DataVars = "all",
-    coords: ConcatOptions | list[Hashable] = "different",
-    compat: CompatOptions = "equals",
+    data_vars: T_DataVars | CombineKwargDefault = _DATA_VARS_DEFAULT,
+    coords: ConcatOptions | Iterable[Hashable] | CombineKwargDefault = _COORDS_DEFAULT,
+    compat: CompatOptions | CombineKwargDefault = _COMPAT_CONCAT_DEFAULT,
     positions: Iterable[Iterable[int]] | None = None,
     fill_value: object = dtypes.NA,
-    join: JoinOptions = "outer",
+    join: JoinOptions | CombineKwargDefault = _JOIN_DEFAULT,
     combine_attrs: CombineAttrsOptions = "override",
     create_index_for_new_dim: bool = True,
 ) -> T_Dataset: ...
@@ -52,12 +60,12 @@ def concat(
 def concat(
     objs: Iterable[T_DataArray],
     dim: Hashable | T_Variable | T_DataArray | pd.Index | Any,
-    data_vars: T_DataVars = "all",
-    coords: ConcatOptions | list[Hashable] = "different",
-    compat: CompatOptions = "equals",
+    data_vars: T_DataVars | CombineKwargDefault = _DATA_VARS_DEFAULT,
+    coords: ConcatOptions | Iterable[Hashable] | CombineKwargDefault = _COORDS_DEFAULT,
+    compat: CompatOptions | CombineKwargDefault = _COMPAT_CONCAT_DEFAULT,
     positions: Iterable[Iterable[int]] | None = None,
     fill_value: object = dtypes.NA,
-    join: JoinOptions = "outer",
+    join: JoinOptions | CombineKwargDefault = _JOIN_DEFAULT,
     combine_attrs: CombineAttrsOptions = "override",
     create_index_for_new_dim: bool = True,
 ) -> T_DataArray: ...
@@ -66,12 +74,12 @@ def concat(
 def concat(
     objs,
     dim,
-    data_vars: T_DataVars = "all",
-    coords="different",
-    compat: CompatOptions = "equals",
+    data_vars: T_DataVars | CombineKwargDefault = _DATA_VARS_DEFAULT,
+    coords: ConcatOptions | Iterable[Hashable] | CombineKwargDefault = _COORDS_DEFAULT,
+    compat: CompatOptions | CombineKwargDefault = _COMPAT_CONCAT_DEFAULT,
     positions=None,
     fill_value=dtypes.NA,
-    join: JoinOptions = "outer",
+    join: JoinOptions | CombineKwargDefault = _JOIN_DEFAULT,
     combine_attrs: CombineAttrsOptions = "override",
     create_index_for_new_dim: bool = True,
 ):
@@ -90,7 +98,7 @@ def concat(
         unchanged. If dimension is provided as a Variable, DataArray or Index, its name
         is used as the dimension to concatenate along and the values are added
         as a coordinate.
-    data_vars : {"minimal", "different", "all"} or list of Hashable, optional
+    data_vars : {"minimal", "different", "all", None} or list of Hashable, optional
         These data variables will be concatenated together:
           * "minimal": Only data variables in which the dimension already
             appears are included.
@@ -100,6 +108,8 @@ def concat(
             load the data payload of data variables into memory if they are not
             already loaded.
           * "all": All data variables will be concatenated.
+          * None: Means ``"all"`` if ``dim`` is not present in any of the ``objs``,
+            and ``"minimal"`` if ``dim`` is present in any of ``objs``.
           * list of dims: The listed data variables will be concatenated, in
             addition to the "minimal" data variables.
 
@@ -107,7 +117,9 @@ def concat(
     coords : {"minimal", "different", "all"} or list of Hashable, optional
         These coordinate variables will be concatenated together:
           * "minimal": Only coordinates in which the dimension already appears
-            are included.
+            are included. If concatenating over a dimension _not_
+            present in any of the objects, then all data variables will
+            be concatenated along that new dimension.
           * "different": Coordinates which are not equal (ignoring attributes)
             across all datasets are also concatenated (as well as all for which
             dimension already appears). Beware: this option may load the data
@@ -199,7 +211,7 @@ def concat(
       * x        (x) <U1 8B 'a' 'b'
       * y        (y) int64 24B 10 20 30
 
-    >>> xr.concat([da.isel(x=0), da.isel(x=1)], "x")
+    >>> xr.concat([da.isel(x=0), da.isel(x=1)], "x", coords="minimal")
     <xarray.DataArray (x: 2, y: 3)> Size: 48B
     array([[0, 1, 2],
            [3, 4, 5]])
@@ -207,23 +219,27 @@ def concat(
       * x        (x) <U1 8B 'a' 'b'
       * y        (y) int64 24B 10 20 30
 
-    >>> xr.concat([da.isel(x=0), da.isel(x=1)], "new_dim")
+    >>> xr.concat([da.isel(x=0), da.isel(x=1)], "new_dim", coords="all")
     <xarray.DataArray (new_dim: 2, y: 3)> Size: 48B
     array([[0, 1, 2],
            [3, 4, 5]])
     Coordinates:
-        x        (new_dim) <U1 8B 'a' 'b'
       * y        (y) int64 24B 10 20 30
+        x        (new_dim) <U1 8B 'a' 'b'
     Dimensions without coordinates: new_dim
 
-    >>> xr.concat([da.isel(x=0), da.isel(x=1)], pd.Index([-90, -100], name="new_dim"))
+    >>> xr.concat(
+    ...     [da.isel(x=0), da.isel(x=1)],
+    ...     pd.Index([-90, -100], name="new_dim"),
+    ...     coords="all",
+    ... )
     <xarray.DataArray (new_dim: 2, y: 3)> Size: 48B
     array([[0, 1, 2],
            [3, 4, 5]])
     Coordinates:
-        x        (new_dim) <U1 8B 'a' 'b'
-      * y        (y) int64 24B 10 20 30
       * new_dim  (new_dim) int64 16B -90 -100
+      * y        (y) int64 24B 10 20 30
+        x        (new_dim) <U1 8B 'a' 'b'
 
     # Concatenate a scalar variable along a new dimension of the same name with and without creating a new index
 
@@ -255,7 +271,9 @@ def concat(
     except StopIteration as err:
         raise ValueError("must supply at least one object to concatenate") from err
 
-    if compat not in set(_VALID_COMPAT) - {"minimal"}:
+    if not isinstance(compat, CombineKwargDefault) and compat not in set(
+        _VALID_COMPAT
+    ) - {"minimal"}:
         raise ValueError(
             f"compat={compat!r} invalid: must be 'broadcast_equals', 'equals', 'identical', 'no_conflicts' or 'override'"
         )
@@ -320,40 +338,89 @@ def _calc_concat_dim_index(
     return dim, index
 
 
-def _calc_concat_over(datasets, dim, dim_names, data_vars: T_DataVars, coords, compat):
+def _calc_concat_over(
+    datasets: list[T_Dataset],
+    dim: Hashable,
+    all_dims: set[Hashable],
+    data_vars: T_DataVars | CombineKwargDefault,
+    coords: ConcatOptions | Iterable[Hashable] | CombineKwargDefault,
+    compat: CompatOptions | CombineKwargDefault,
+) -> tuple[set[Hashable], dict[Hashable, bool], list[int], set[Hashable]]:
     """
     Determine which dataset variables need to be concatenated in the result,
     """
-    # Return values
+    # variables to be concatenated
     concat_over = set()
-    equals = {}
+    # variables checked for equality
+    equals: dict[Hashable, bool] = {}
+    # skip merging these variables.
+    #   if concatenating over a dimension 'x' that is associated with an index over 2 variables,
+    #   'x' and 'y', then we assert join="equals" on `y` and don't need to merge it.
+    #   that assertion happens in the align step prior to this function being called
+    skip_merge: set[Hashable] = set()
 
-    if dim in dim_names:
+    if dim in all_dims:
         concat_over_existing_dim = True
         concat_over.add(dim)
     else:
         concat_over_existing_dim = False
 
+    if data_vars == "minimal" and coords == "minimal" and not concat_over_existing_dim:
+        raise ValueError(
+            "Cannot specify both data_vars='minimal' and coords='minimal' when "
+            "concatenating over a new dimension."
+        )
+
+    if data_vars is None or (
+        isinstance(data_vars, CombineKwargDefault) and data_vars._value is None
+    ):
+        data_vars = "minimal" if concat_over_existing_dim else "all"
+
     concat_dim_lengths = []
     for ds in datasets:
-        if concat_over_existing_dim:
-            if dim not in ds.dims:
-                if dim in ds:
-                    ds = ds.set_coords(dim)
+        if concat_over_existing_dim and dim not in ds.dims and dim in ds:
+            ds = ds.set_coords(dim)
         concat_over.update(k for k, v in ds.variables.items() if dim in v.dims)
+        for _, idx_vars in ds.xindexes.group_by_index():
+            if any(dim in v.dims for v in idx_vars.values()):
+                skip_merge.update(idx_vars.keys())
         concat_dim_lengths.append(ds.sizes.get(dim, 1))
 
-    def process_subset_opt(opt, subset):
-        if isinstance(opt, str):
+    def process_subset_opt(
+        opt: ConcatOptions | Iterable[Hashable] | CombineKwargDefault,
+        subset: Literal["coords", "data_vars"],
+    ) -> None:
+        original = set(concat_over)
+        compat_str = (
+            compat._value if isinstance(compat, CombineKwargDefault) else compat
+        )
+        assert compat_str is not None
+        if isinstance(opt, str | CombineKwargDefault):
             if opt == "different":
+                if isinstance(compat, CombineKwargDefault) and compat != "override":
+                    if not isinstance(opt, CombineKwargDefault):
+                        emit_user_level_warning(
+                            compat.warning_message(
+                                "This change will result in the following ValueError: "
+                                f"Cannot specify both {subset}='different' and compat='override'.",
+                                recommend_set_options=False,
+                            ),
+                            FutureWarning,
+                        )
+
                 if compat == "override":
                     raise ValueError(
                         f"Cannot specify both {subset}='different' and compat='override'."
+                        + (
+                            compat.error_message()
+                            if isinstance(compat, CombineKwargDefault)
+                            else ""
+                        )
                     )
                 # all nonindexes that are not the same in each dataset
                 for k in getattr(datasets[0], subset):
                     if k not in concat_over:
-                        equals[k] = None
+                        equal = None
 
                         variables = [
                             ds.variables[k] for ds in datasets if k in ds.variables
@@ -372,19 +439,19 @@ def _calc_concat_over(datasets, dim, dim
 
                         # first check without comparing values i.e. no computes
                         for var in variables[1:]:
-                            equals[k] = getattr(variables[0], compat)(
+                            equal = getattr(variables[0], compat_str)(
                                 var, equiv=lazy_array_equiv
                             )
-                            if equals[k] is not True:
+                            if equal is not True:
                                 # exit early if we know these are not equal or that
                                 # equality cannot be determined i.e. one or all of
                                 # the variables wraps a numpy array
                                 break
 
-                        if equals[k] is False:
+                        if equal is False:
                             concat_over.add(k)
 
-                        elif equals[k] is None:
+                        elif equal is None:
                             # Compare the variable of all datasets vs. the one
                             # of the first dataset. Perform the minimum amount of
                             # loads in order to avoid multiple loads from disk
@@ -395,7 +462,7 @@ def _calc_concat_over(datasets, dim, dim
                             for ds_rhs in datasets[1:]:
                                 v_rhs = ds_rhs.variables[k].compute()
                                 computed.append(v_rhs)
-                                if not getattr(v_lhs, compat)(v_rhs):
+                                if not getattr(v_lhs, compat_str)(v_rhs):
                                     concat_over.add(k)
                                     equals[k] = False
                                     # computed variables are not to be re-computed
@@ -406,18 +473,34 @@ def _calc_concat_over(datasets, dim, dim
                                         ds.variables[k].data = v.data
                                     break
                             else:
-                                equals[k] = True
+                                equal = True
+                        if TYPE_CHECKING:
+                            assert equal is not None
+                        equals[k] = equal
 
             elif opt == "all":
                 concat_over.update(
                     set().union(
-                        *list(set(getattr(d, subset)) - set(d.dims) for d in datasets)
+                        *[set(getattr(d, subset)) - set(d.dims) for d in datasets]
                     )
                 )
             elif opt == "minimal":
                 pass
             else:
                 raise ValueError(f"unexpected value for {subset}: {opt}")
+
+            if (
+                isinstance(opt, CombineKwargDefault)
+                and opt._value is not None
+                and original != concat_over
+                and concat_over_existing_dim
+            ):
+                warnings.append(
+                    opt.warning_message(
+                        "This is likely to lead to different results when multiple datasets "
+                        "have matching variables with overlapping values.",
+                    )
+                )
         else:
             valid_vars = tuple(getattr(datasets[0], subset))
             invalid_vars = [k for k in opt if k not in valid_vars]
@@ -436,15 +519,21 @@ def _calc_concat_over(datasets, dim, dim
                     )
             concat_over.update(opt)
 
+    warnings: list[str] = []
     process_subset_opt(data_vars, "data_vars")
     process_subset_opt(coords, "coords")
-    return concat_over, equals, concat_dim_lengths
+
+    for warning in warnings:
+        emit_user_level_warning(warning, FutureWarning)
+
+    return concat_over, equals, concat_dim_lengths, skip_merge
 
 
 # determine dimensional coordinate names and a dict mapping name to DataArray
 def _parse_datasets(
     datasets: list[T_Dataset],
 ) -> tuple[
+    set[Hashable],
     dict[Hashable, Variable],
     dict[Hashable, int],
     set[Hashable],
@@ -473,20 +562,27 @@ def _parse_datasets(
                 dim_coords[dim] = ds.coords[dim].variable
         dims = dims | set(ds.dims)
 
-    return dim_coords, dims_sizes, all_coord_names, data_vars, list(variables_order)
+    return (
+        dims,
+        dim_coords,
+        dims_sizes,
+        all_coord_names,
+        data_vars,
+        list(variables_order),
+    )
 
 
 def _dataset_concat(
     datasets: Iterable[T_Dataset],
     dim: str | T_Variable | T_DataArray | pd.Index,
-    data_vars: T_DataVars,
-    coords: str | list[str],
-    compat: CompatOptions,
+    data_vars: T_DataVars | CombineKwargDefault,
+    coords: ConcatOptions | Iterable[Hashable] | CombineKwargDefault,
+    compat: CompatOptions | CombineKwargDefault,
     positions: Iterable[Iterable[int]] | None,
-    fill_value: Any = dtypes.NA,
-    join: JoinOptions = "outer",
-    combine_attrs: CombineAttrsOptions = "override",
-    create_index_for_new_dim: bool = True,
+    fill_value: Any,
+    join: JoinOptions | CombineKwargDefault,
+    combine_attrs: CombineAttrsOptions,
+    create_index_for_new_dim: bool,
 ) -> T_Dataset:
     """
     Concatenate a sequence of datasets along a new or existing dimension
@@ -501,6 +597,7 @@ def _dataset_concat(
             "The elements in the input list need to be either all 'Dataset's or all 'DataArray's"
         )
 
+    dim_var: Variable | None
     if isinstance(dim, DataArray):
         dim_var = dim.variable
     elif isinstance(dim, Variable):
@@ -518,10 +615,10 @@ def _dataset_concat(
         )
     )
 
-    dim_coords, dims_sizes, coord_names, data_names, vars_order = _parse_datasets(
-        datasets
+    all_dims, dim_coords, dims_sizes, coord_names, data_names, vars_order = (
+        _parse_datasets(datasets)
     )
-    dim_names = set(dim_coords)
+    indexed_dim_names = set(dim_coords)
 
     both_data_and_coords = coord_names & data_names
     if both_data_and_coords:
@@ -535,19 +632,23 @@ def _dataset_concat(
     # case where concat dimension is a coordinate or data_var but not a dimension
     if (
         dim_name in coord_names or dim_name in data_names
-    ) and dim_name not in dim_names:
+    ) and dim_name not in indexed_dim_names:
         datasets = [
             ds.expand_dims(dim_name, create_index_for_new_dim=create_index_for_new_dim)
             for ds in datasets
         ]
+        all_dims.add(dim_name)
+        # This isn't being used any more, but keeping it up to date
+        # just in case we decide to use it later.
+        indexed_dim_names.add(dim_name)
 
     # determine which variables to concatenate
-    concat_over, equals, concat_dim_lengths = _calc_concat_over(
-        datasets, dim_name, dim_names, data_vars, coords, compat
+    concat_over, equals, concat_dim_lengths, skip_merge = _calc_concat_over(
+        datasets, dim_name, all_dims, data_vars, coords, compat
     )
 
     # determine which variables to merge, and then merge them according to compat
-    variables_to_merge = (coord_names | data_names) - concat_over
+    variables_to_merge = (coord_names | data_names) - concat_over - skip_merge
 
     result_vars = {}
     result_indexes = {}
@@ -657,7 +758,7 @@ def _dataset_concat(
                 else:
                     # index created from a scalar coordinate
                     idx_vars = {name: datasets[0][name].variable}
-                result_indexes.update({k: combined_idx for k in idx_vars})
+                result_indexes.update(dict.fromkeys(idx_vars, combined_idx))
                 combined_idx_vars = combined_idx.create_variables(idx_vars)
                 for k, v in combined_idx_vars.items():
                     v.attrs = merge_attrs(
@@ -718,14 +819,14 @@ def _dataset_concat(
 def _dataarray_concat(
     arrays: Iterable[T_DataArray],
     dim: str | T_Variable | T_DataArray | pd.Index,
-    data_vars: T_DataVars,
-    coords: str | list[str],
-    compat: CompatOptions,
+    data_vars: T_DataVars | CombineKwargDefault,
+    coords: ConcatOptions | Iterable[Hashable] | CombineKwargDefault,
+    compat: CompatOptions | CombineKwargDefault,
     positions: Iterable[Iterable[int]] | None,
-    fill_value: object = dtypes.NA,
-    join: JoinOptions = "outer",
-    combine_attrs: CombineAttrsOptions = "override",
-    create_index_for_new_dim: bool = True,
+    fill_value: object,
+    join: JoinOptions | CombineKwargDefault,
+    combine_attrs: CombineAttrsOptions,
+    create_index_for_new_dim: bool,
 ) -> T_DataArray:
     from xarray.core.dataarray import DataArray
 
@@ -736,7 +837,12 @@ def _dataarray_concat(
             "The elements in the input list need to be either all 'Dataset's or all 'DataArray's"
         )
 
-    if data_vars != "all":
+    # Allow passing `all` or `None` even though we always use `data_vars='all'`
+    # when passing off to `_dataset_concat`.
+    if not isinstance(data_vars, CombineKwargDefault) and data_vars not in [
+        "all",
+        None,
+    ]:
         raise ValueError(
             "data_vars is not a valid argument when concatenating DataArray objects"
         )
@@ -754,11 +860,11 @@ def _dataarray_concat(
 
     ds = _dataset_concat(
         datasets,
-        dim,
-        data_vars,
-        coords,
-        compat,
-        positions,
+        dim=dim,
+        data_vars="all",
+        coords=coords,
+        compat=compat,
+        positions=positions,
         fill_value=fill_value,
         join=join,
         combine_attrs=combine_attrs,
diff -pruN 2025.03.1-8/xarray/structure/merge.py 2025.10.1-1/xarray/structure/merge.py
--- 2025.03.1-8/xarray/structure/merge.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/xarray/structure/merge.py	2025-10-10 10:38:05.000000000 +0000
@@ -1,7 +1,8 @@
 from __future__ import annotations
 
 from collections import defaultdict
-from collections.abc import Hashable, Iterable, Mapping, Sequence, Set
+from collections.abc import Hashable, Iterable, Mapping, Sequence
+from collections.abc import Set as AbstractSet
 from typing import TYPE_CHECKING, Any, NamedTuple, Union
 
 import pandas as pd
@@ -14,9 +15,20 @@ from xarray.core.indexes import (
     filter_indexes_from_coords,
     indexes_equal,
 )
-from xarray.core.utils import Frozen, compat_dict_union, dict_equiv, equivalent
+from xarray.core.utils import (
+    Frozen,
+    compat_dict_union,
+    dict_equiv,
+    emit_user_level_warning,
+    equivalent,
+)
 from xarray.core.variable import Variable, as_variable, calculate_dimensions
 from xarray.structure.alignment import deep_align
+from xarray.util.deprecation_helpers import (
+    _COMPAT_DEFAULT,
+    _JOIN_DEFAULT,
+    CombineKwargDefault,
+)
 
 if TYPE_CHECKING:
     from xarray.core.coordinates import Coordinates
@@ -88,9 +100,9 @@ class MergeError(ValueError):
 def unique_variable(
     name: Hashable,
     variables: list[Variable],
-    compat: CompatOptions = "broadcast_equals",
+    compat: CompatOptions | CombineKwargDefault = "broadcast_equals",
     equals: bool | None = None,
-) -> Variable:
+) -> tuple[bool | None, Variable]:
     """Return the unique variable from a list of variables or raise MergeError.
 
     Parameters
@@ -116,7 +128,7 @@ def unique_variable(
     out = variables[0]
 
     if len(variables) == 1 or compat == "override":
-        return out
+        return equals, out
 
     combine_method = None
 
@@ -130,18 +142,25 @@ def unique_variable(
     if compat == "no_conflicts":
         combine_method = "fillna"
 
+    # we return the lazy equals, so we can warn about behaviour changes
+    lazy_equals = equals
     if equals is None:
+        compat_str = (
+            compat._value if isinstance(compat, CombineKwargDefault) else compat
+        )
+        assert compat_str is not None
         # first check without comparing values i.e. no computes
         for var in variables[1:]:
-            equals = getattr(out, compat)(var, equiv=lazy_array_equiv)
+            equals = getattr(out, compat_str)(var, equiv=lazy_array_equiv)
             if equals is not True:
                 break
 
+        lazy_equals = equals
         if equals is None:
             # now compare values with minimum number of computes
             out = out.compute()
             for var in variables[1:]:
-                equals = getattr(out, compat)(var)
+                equals = getattr(out, compat_str)(var)
                 if not equals:
                     break
 
@@ -155,11 +174,11 @@ def unique_variable(
         for var in variables[1:]:
             out = getattr(out, combine_method)(var)
 
-    return out
+    return lazy_equals, out
 
 
 def _assert_compat_valid(compat):
-    if compat not in _VALID_COMPAT:
+    if not isinstance(compat, CombineKwargDefault) and compat not in _VALID_COMPAT:
         raise ValueError(f"compat={compat!r} invalid: must be {set(_VALID_COMPAT)}")
 
 
@@ -201,7 +220,7 @@ def _assert_prioritized_valid(
 def merge_collected(
     grouped: dict[Any, list[MergeElement]],
     prioritized: Mapping[Any, MergeElement] | None = None,
-    compat: CompatOptions = "minimal",
+    compat: CompatOptions | CombineKwargDefault = "minimal",
     combine_attrs: CombineAttrsOptions = "override",
     equals: dict[Any, bool] | None = None,
 ) -> tuple[dict[Hashable, Variable], dict[Hashable, Index]]:
@@ -257,6 +276,7 @@ def merge_collected(
             if index is not None:
                 merged_indexes[name] = index
         else:
+            attrs: dict[Any, Any] = {}
             indexed_elements = [
                 (variable, index)
                 for variable, index in elements_list
@@ -283,18 +303,33 @@ def merge_collected(
                                 "conflicting attribute values on combined "
                                 f"variable {name!r}:\nfirst value: {variable.attrs!r}\nsecond value: {other_variable.attrs!r}"
                             )
-                merged_vars[name] = variable
-                merged_vars[name].attrs = merge_attrs(
+                attrs = merge_attrs(
                     [var.attrs for var, _ in indexed_elements],
                     combine_attrs=combine_attrs,
                 )
+                merged_vars[name] = variable
                 merged_indexes[name] = index
             else:
                 variables = [variable for variable, _ in elements_list]
                 try:
-                    merged_vars[name] = unique_variable(
-                        name, variables, compat, equals.get(name, None)
+                    equals_this_var, merged_vars[name] = unique_variable(
+                        name, variables, compat, equals.get(name)
                     )
+                    # This is very likely to result in false positives, but there is no way
+                    # to tell if the output will change without computing.
+                    if (
+                        isinstance(compat, CombineKwargDefault)
+                        and compat == "no_conflicts"
+                        and len(variables) > 1
+                        and not equals_this_var
+                    ):
+                        emit_user_level_warning(
+                            compat.warning_message(
+                                "This is likely to lead to different results when "
+                                "combining overlapping variables with the same name.",
+                            ),
+                            FutureWarning,
+                        )
                 except MergeError:
                     if compat != "minimal":
                         # we need more than "minimal" compatibility (for which
@@ -302,10 +337,15 @@ def merge_collected(
                         raise
 
                 if name in merged_vars:
-                    merged_vars[name].attrs = merge_attrs(
+                    attrs = merge_attrs(
                         [var.attrs for var in variables], combine_attrs=combine_attrs
                     )
 
+            if name in merged_vars and (merged_vars[name].attrs or attrs):
+                # Ensure that assigning attrs does not affect the original input variable.
+                merged_vars[name] = merged_vars[name].copy(deep=False)
+                merged_vars[name].attrs = attrs
+
     return merged_vars, merged_indexes
 
 
@@ -365,7 +405,7 @@ def collect_variables_and_indexes(
                 append(name, variable, indexes[name])
             elif variable.dims == (name,):
                 idx, idx_vars = create_default_index_implicit(variable)
-                append_all(idx_vars, {k: idx for k in idx_vars})
+                append_all(idx_vars, dict.fromkeys(idx_vars, idx))
             else:
                 append(name, variable, None)
 
@@ -390,7 +430,7 @@ def collect_from_coordinates(
 def merge_coordinates_without_align(
     objects: list[Coordinates],
     prioritized: Mapping[Any, MergeElement] | None = None,
-    exclude_dims: Set = frozenset(),
+    exclude_dims: AbstractSet = frozenset(),
     combine_attrs: CombineAttrsOptions = "override",
 ) -> tuple[dict[Hashable, Variable], dict[Hashable, Index]]:
     """Merge variables/indexes from coordinates without automatic alignments.
@@ -499,7 +539,7 @@ def coerce_pandas_values(objects: Iterab
 def _get_priority_vars_and_indexes(
     objects: Sequence[DatasetLike],
     priority_arg: int | None,
-    compat: CompatOptions = "equals",
+    compat: CompatOptions | CombineKwargDefault = "equals",
 ) -> dict[Hashable, MergeElement]:
     """Extract the priority variable from a list of mappings.
 
@@ -567,6 +607,25 @@ def merge_coords(
     return variables, out_indexes
 
 
+def equivalent_attrs(a: Any, b: Any) -> bool:
+    """Check if two attribute values are equivalent.
+
+    Returns False if the comparison raises ValueError or TypeError.
+    This handles cases like numpy arrays with ambiguous truth values
+    and xarray Datasets which can't be directly converted to numpy arrays.
+
+    Since equivalent() now handles non-boolean returns by returning False,
+    this wrapper mainly catches exceptions from comparisons that can't be
+    evaluated at all.
+    """
+    try:
+        return equivalent(a, b)
+    except (ValueError, TypeError):
+        # These exceptions indicate the comparison is truly ambiguous
+        # (e.g., nested numpy arrays that would raise "ambiguous truth value")
+        return False
+
+
 def merge_attrs(variable_attrs, combine_attrs, context=None):
     """Combine attributes from different variables according to combine_attrs"""
     if not variable_attrs:
@@ -593,20 +652,18 @@ def merge_attrs(variable_attrs, combine_
     elif combine_attrs == "drop_conflicts":
         result = {}
         dropped_keys = set()
+
         for attrs in variable_attrs:
-            result.update(
-                {
-                    key: value
-                    for key, value in attrs.items()
-                    if key not in result and key not in dropped_keys
-                }
-            )
-            result = {
-                key: value
-                for key, value in result.items()
-                if key not in attrs or equivalent(attrs[key], value)
-            }
-            dropped_keys |= {key for key in attrs if key not in result}
+            for key, value in attrs.items():
+                if key in dropped_keys:
+                    continue
+
+                if key not in result:
+                    result[key] = value
+                elif not equivalent_attrs(result[key], value):
+                    del result[key]
+                    dropped_keys.add(key)
+
         return result
     elif combine_attrs == "identical":
         result = dict(variable_attrs[0])
@@ -631,8 +688,8 @@ class _MergeResult(NamedTuple):
 
 def merge_core(
     objects: Iterable[CoercibleMapping],
-    compat: CompatOptions = "broadcast_equals",
-    join: JoinOptions = "outer",
+    compat: CompatOptions | CombineKwargDefault,
+    join: JoinOptions | CombineKwargDefault,
     combine_attrs: CombineAttrsOptions = "override",
     priority_arg: int | None = None,
     explicit_coords: Iterable[Hashable] | None = None,
@@ -691,7 +748,11 @@ def merge_core(
 
     coerced = coerce_pandas_values(objects)
     aligned = deep_align(
-        coerced, join=join, copy=False, indexes=indexes, fill_value=fill_value
+        coerced,
+        join=join,
+        copy=False,
+        indexes=indexes,
+        fill_value=fill_value,
     )
 
     for pos, obj in skip_align_objs:
@@ -700,7 +761,10 @@ def merge_core(
     collected = collect_variables_and_indexes(aligned, indexes=indexes)
     prioritized = _get_priority_vars_and_indexes(aligned, priority_arg, compat=compat)
     variables, out_indexes = merge_collected(
-        collected, prioritized, compat=compat, combine_attrs=combine_attrs
+        collected,
+        prioritized,
+        compat=compat,
+        combine_attrs=combine_attrs,
     )
 
     dims = calculate_dimensions(variables)
@@ -711,7 +775,7 @@ def merge_core(
         coord_names.intersection_update(variables)
     if explicit_coords is not None:
         coord_names.update(explicit_coords)
-    for dim in dims.keys():
+    for dim in dims:
         if dim in variables:
             coord_names.add(dim)
     ambiguous_coords = coord_names.intersection(noncoord_names)
@@ -731,8 +795,8 @@ def merge_core(
 
 def merge(
     objects: Iterable[DataArray | CoercibleMapping],
-    compat: CompatOptions = "no_conflicts",
-    join: JoinOptions = "outer",
+    compat: CompatOptions | CombineKwargDefault = _COMPAT_DEFAULT,
+    join: JoinOptions | CombineKwargDefault = _JOIN_DEFAULT,
     fill_value: object = dtypes.NA,
     combine_attrs: CombineAttrsOptions = "override",
 ) -> Dataset:
@@ -843,7 +907,7 @@ def merge(
       * time     (time) float64 16B 30.0 60.0
       * lon      (lon) float64 16B 100.0 150.0
 
-    >>> xr.merge([x, y, z])
+    >>> xr.merge([x, y, z], join="outer")
     <xarray.Dataset> Size: 256B
     Dimensions:  (lat: 3, lon: 3, time: 2)
     Coordinates:
@@ -855,7 +919,7 @@ def merge(
         var2     (lat, lon) float64 72B 5.0 nan 6.0 nan nan nan 7.0 nan 8.0
         var3     (time, lon) float64 48B 0.0 nan 3.0 4.0 nan 9.0
 
-    >>> xr.merge([x, y, z], compat="identical")
+    >>> xr.merge([x, y, z], compat="identical", join="outer")
     <xarray.Dataset> Size: 256B
     Dimensions:  (lat: 3, lon: 3, time: 2)
     Coordinates:
@@ -867,7 +931,7 @@ def merge(
         var2     (lat, lon) float64 72B 5.0 nan 6.0 nan nan nan 7.0 nan 8.0
         var3     (time, lon) float64 48B 0.0 nan 3.0 4.0 nan 9.0
 
-    >>> xr.merge([x, y, z], compat="equals")
+    >>> xr.merge([x, y, z], compat="equals", join="outer")
     <xarray.Dataset> Size: 256B
     Dimensions:  (lat: 3, lon: 3, time: 2)
     Coordinates:
@@ -879,7 +943,7 @@ def merge(
         var2     (lat, lon) float64 72B 5.0 nan 6.0 nan nan nan 7.0 nan 8.0
         var3     (time, lon) float64 48B 0.0 nan 3.0 4.0 nan 9.0
 
-    >>> xr.merge([x, y, z], compat="equals", fill_value=-999.0)
+    >>> xr.merge([x, y, z], compat="equals", join="outer", fill_value=-999.0)
     <xarray.Dataset> Size: 256B
     Dimensions:  (lat: 3, lon: 3, time: 2)
     Coordinates:
@@ -942,7 +1006,7 @@ def merge(
     >>> xr.merge([x, y, z], join="exact")
     Traceback (most recent call last):
     ...
-    ValueError: cannot align objects with join='exact' where ...
+    xarray.structure.alignment.AlignmentError: cannot align objects with join='exact' where ...
 
     Raises
     ------
@@ -976,8 +1040,8 @@ def merge(
 
     merge_result = merge_core(
         dict_like_objects,
-        compat,
-        join,
+        compat=compat,
+        join=join,
         combine_attrs=combine_attrs,
         fill_value=fill_value,
     )
@@ -988,8 +1052,8 @@ def dataset_merge_method(
     dataset: Dataset,
     other: CoercibleMapping,
     overwrite_vars: Hashable | Iterable[Hashable],
-    compat: CompatOptions,
-    join: JoinOptions,
+    compat: CompatOptions | CombineKwargDefault,
+    join: JoinOptions | CombineKwargDefault,
     fill_value: Any,
     combine_attrs: CombineAttrsOptions,
 ) -> _MergeResult:
@@ -1022,8 +1086,8 @@ def dataset_merge_method(
 
     return merge_core(
         objs,
-        compat,
-        join,
+        compat=compat,
+        join=join,
         priority_arg=priority_arg,
         fill_value=fill_value,
         combine_attrs=combine_attrs,
@@ -1055,6 +1119,8 @@ def dataset_update_method(dataset: Datas
 
     return merge_core(
         [dataset, other],
+        compat="broadcast_equals",
+        join="outer",
         priority_arg=1,
         indexes=dataset.xindexes,
         combine_attrs="override",
@@ -1076,6 +1142,7 @@ def merge_data_and_coords(data_vars: Dat
         [data_vars, coords],
         compat="broadcast_equals",
         join="outer",
+        combine_attrs="override",
         explicit_coords=tuple(coords),
         indexes=coords.xindexes,
         priority_arg=1,
diff -pruN 2025.03.1-8/xarray/testing/assertions.py 2025.10.1-1/xarray/testing/assertions.py
--- 2025.03.1-8/xarray/testing/assertions.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/xarray/testing/assertions.py	2025-10-10 10:38:05.000000000 +0000
@@ -12,6 +12,7 @@ from xarray.core.coordinates import Coor
 from xarray.core.dataarray import DataArray
 from xarray.core.dataset import Dataset
 from xarray.core.datatree import DataTree
+from xarray.core.datatree_mapping import map_over_datasets
 from xarray.core.formatting import diff_datatree_repr
 from xarray.core.indexes import Index, PandasIndex, PandasMultiIndex, default_indexes
 from xarray.core.variable import IndexVariable, Variable
@@ -85,14 +86,25 @@ def assert_isomorphic(a: DataTree, b: Da
 
 def maybe_transpose_dims(a, b, check_dim_order: bool):
     """Helper for assert_equal/allclose/identical"""
+
     __tracebackhide__ = True
-    if not isinstance(a, Variable | DataArray | Dataset):
+
+    def _maybe_transpose_dims(a, b):
+        if not isinstance(a, Variable | DataArray | Dataset):
+            return b
+        if set(a.dims) == set(b.dims):
+            # Ensure transpose won't fail if a dimension is missing
+            # If this is the case, the difference will be caught by the caller
+            return b.transpose(*a.dims)
+        return b
+
+    if check_dim_order:
         return b
-    if not check_dim_order and set(a.dims) == set(b.dims):
-        # Ensure transpose won't fail if a dimension is missing
-        # If this is the case, the difference will be caught by the caller
-        return b.transpose(*a.dims)
-    return b
+
+    if isinstance(a, DataTree):
+        return map_over_datasets(_maybe_transpose_dims, a, b)
+
+    return _maybe_transpose_dims(a, b)
 
 
 @ensure_warnings
@@ -240,6 +252,9 @@ def assert_allclose(
             a.variables, b.variables, compat=compat_variable
         )
         assert allclose, formatting.diff_dataset_repr(a, b, compat=equiv)
+    elif isinstance(a, Coordinates):
+        allclose = utils.dict_equiv(a.variables, b.variables, compat=compat_variable)
+        assert allclose, formatting.diff_coords_repr(a, b, compat=equiv)
     else:
         raise TypeError(f"{type(a)} not supported by assertion comparison")
 
@@ -330,10 +345,13 @@ def _assert_indexes_invariants_checks(
         k: type(v) for k, v in indexes.items()
     }
 
-    index_vars = {
-        k for k, v in possible_coord_variables.items() if isinstance(v, IndexVariable)
-    }
-    assert indexes.keys() <= index_vars, (set(indexes), index_vars)
+    if check_default:
+        index_vars = {
+            k
+            for k, v in possible_coord_variables.items()
+            if isinstance(v, IndexVariable)
+        }
+        assert indexes.keys() <= index_vars, (set(indexes), index_vars)
 
     # check pandas index wrappers vs. coordinate data adapters
     for k, index in indexes.items():
@@ -395,13 +413,18 @@ def _assert_dataarray_invariants(da: Dat
 
     assert isinstance(da._coords, dict), da._coords
     assert all(isinstance(v, Variable) for v in da._coords.values()), da._coords
-    assert all(set(v.dims) <= set(da.dims) for v in da._coords.values()), (
-        da.dims,
-        {k: v.dims for k, v in da._coords.items()},
-    )
-    assert all(
-        isinstance(v, IndexVariable) for (k, v) in da._coords.items() if v.dims == (k,)
-    ), {k: type(v) for k, v in da._coords.items()}
+
+    if check_default_indexes:
+        assert all(set(v.dims) <= set(da.dims) for v in da._coords.values()), (
+            da.dims,
+            {k: v.dims for k, v in da._coords.items()},
+        )
+        assert all(
+            isinstance(v, IndexVariable)
+            for (k, v) in da._coords.items()
+            if v.dims == (k,)
+        ), {k: type(v) for k, v in da._coords.items()}
+
     for k, v in da._coords.items():
         _assert_variable_invariants(v, k)
 
diff -pruN 2025.03.1-8/xarray/testing/strategies.py 2025.10.1-1/xarray/testing/strategies.py
--- 2025.03.1-8/xarray/testing/strategies.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/xarray/testing/strategies.py	2025-10-10 10:38:05.000000000 +0000
@@ -479,6 +479,7 @@ def unique_subset_of(
 
 class CFTimeStrategy(st.SearchStrategy):
     def __init__(self, min_value, max_value):
+        super().__init__()
         self.min_value = min_value
         self.max_value = max_value
 
@@ -495,6 +496,7 @@ class CFTimeStrategyISO8601(st.SearchStr
     def __init__(self):
         from xarray.tests.test_coding_times import _all_cftime_date_types
 
+        super().__init__()
         self.date_types = _all_cftime_date_types()
         self.calendars = list(self.date_types)
 
diff -pruN 2025.03.1-8/xarray/tests/CLAUDE.md 2025.10.1-1/xarray/tests/CLAUDE.md
--- 2025.03.1-8/xarray/tests/CLAUDE.md	1970-01-01 00:00:00.000000000 +0000
+++ 2025.10.1-1/xarray/tests/CLAUDE.md	2025-10-10 10:38:05.000000000 +0000
@@ -0,0 +1,132 @@
+# Testing Guidelines for xarray
+
+## Handling Optional Dependencies
+
+xarray has many optional dependencies that may not be available in all testing environments. Always use the standard decorators and patterns when writing tests that require specific dependencies.
+
+### Standard Decorators
+
+**ALWAYS use decorators** like `@requires_dask`, `@requires_cftime`, etc. instead of conditional `if` statements.
+
+All available decorators are defined in `xarray/tests/__init__.py` (look for `requires_*` decorators).
+
+### DO NOT use conditional imports or skipif
+
+❌ **WRONG - Do not do this:**
+
+```python
+def test_mean_with_cftime():
+    if has_dask:  # WRONG!
+        ds = ds.chunk({})
+        result = ds.mean()
+```
+
+❌ **ALSO WRONG - Avoid pytest.mark.skipif in parametrize:**
+
+```python
+@pytest.mark.parametrize(
+    "chunk",
+    [
+        pytest.param(
+            True, marks=pytest.mark.skipif(not has_dask, reason="requires dask")
+        ),
+        False,
+    ],
+)
+def test_something(chunk): ...
+```
+
+✅ **CORRECT - Do this instead:**
+
+```python
+def test_mean_with_cftime():
+    # Test without dask
+    result = ds.mean()
+
+
+@requires_dask
+def test_mean_with_cftime_dask():
+    # Separate test for dask functionality
+    ds = ds.chunk({})
+    result = ds.mean()
+```
+
+✅ **OR for parametrized tests, split them:**
+
+```python
+def test_something_without_dask():
+    # Test the False case
+    ...
+
+
+@requires_dask
+def test_something_with_dask():
+    # Test the True case with dask
+    ...
+```
+
+### Multiple dependencies
+
+When a test requires multiple optional dependencies:
+
+```python
+@requires_dask
+@requires_scipy
+def test_interpolation_with_dask(): ...
+```
+
+### Importing optional dependencies in tests
+
+For imports within test functions, use `pytest.importorskip`:
+
+```python
+def test_cftime_functionality():
+    cftime = pytest.importorskip("cftime")
+    # Now use cftime
+```
+
+### Common patterns
+
+1. **Split tests by dependency** - Don't mix optional dependency code with base functionality:
+
+   ```python
+   def test_base_functionality():
+       # Core test without optional deps
+       result = ds.mean()
+       assert result is not None
+
+
+   @requires_dask
+   def test_dask_functionality():
+       # Dask-specific test
+       ds_chunked = ds.chunk({})
+       result = ds_chunked.mean()
+       assert result is not None
+   ```
+
+2. **Use fixtures for dependency-specific setup**:
+
+   ```python
+   @pytest.fixture
+   def dask_array():
+       pytest.importorskip("dask.array")
+       import dask.array as da
+
+       return da.from_array([1, 2, 3], chunks=2)
+   ```
+
+3. **Check available implementations**:
+
+   ```python
+   from xarray.core.duck_array_ops import available_implementations
+
+
+   @pytest.mark.parametrize("implementation", available_implementations())
+   def test_with_available_backends(implementation): ...
+   ```
+
+### Key Points
+
+- CI environments intentionally exclude certain dependencies (e.g., `all-but-dask`, `bare-minimum`)
+- A test failing in "all-but-dask" because it uses dask is a test bug, not a CI issue
+- Look at similar existing tests for patterns to follow
diff -pruN 2025.03.1-8/xarray/tests/__init__.py 2025.10.1-1/xarray/tests/__init__.py
--- 2025.03.1-8/xarray/tests/__init__.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/xarray/tests/__init__.py	2025-10-10 10:38:05.000000000 +0000
@@ -48,6 +48,7 @@ except ImportError:
 warnings.filterwarnings("ignore", "'urllib3.contrib.pyopenssl' module is deprecated")
 warnings.filterwarnings("ignore", "Deprecated call to `pkg_resources.declare_namespace")
 warnings.filterwarnings("ignore", "pkg_resources is deprecated as an API")
+warnings.filterwarnings("ignore", message="numpy.ndarray size changed")
 
 arm_xfail = pytest.mark.xfail(
     platform.machine() == "aarch64" or "arm" in platform.machine(),
@@ -60,7 +61,9 @@ def assert_writeable(ds):
         name
         for name, var in ds.variables.items()
         if not isinstance(var, IndexVariable)
-        and not isinstance(var.data, PandasExtensionArray)
+        and not isinstance(
+            var.data, PandasExtensionArray | pd.api.extensions.ExtensionArray
+        )
         and not var.data.flags.writeable
     ]
     assert not readonly, readonly
@@ -128,6 +131,26 @@ has_bottleneck, requires_bottleneck = _i
 has_rasterio, requires_rasterio = _importorskip("rasterio")
 has_zarr, requires_zarr = _importorskip("zarr")
 has_zarr_v3, requires_zarr_v3 = _importorskip("zarr", "3.0.0")
+has_zarr_v3_dtypes, requires_zarr_v3_dtypes = _importorskip("zarr", "3.1.0")
+has_zarr_v3_async_oindex, requires_zarr_v3_async_oindex = _importorskip("zarr", "3.1.2")
+if has_zarr_v3:
+    import zarr
+
+    # manual update by checking attrs for now
+    # TODO: use version specifier
+    # installing from git main is giving me a lower version than the
+    # most recently released zarr
+    has_zarr_v3_dtypes = hasattr(zarr.core, "dtype")
+    has_zarr_v3_async_oindex = hasattr(zarr.AsyncArray, "oindex")
+
+    requires_zarr_v3_dtypes = pytest.mark.skipif(
+        not has_zarr_v3_dtypes, reason="requires zarr>3.1.0"
+    )
+    requires_zarr_v3_async_oindex = pytest.mark.skipif(
+        not has_zarr_v3_async_oindex, reason="requires zarr>3.1.1"
+    )
+
+
 has_fsspec, requires_fsspec = _importorskip("fsspec")
 has_iris, requires_iris = _importorskip("iris")
 has_numbagg, requires_numbagg = _importorskip("numbagg")
@@ -156,6 +179,10 @@ has_scipy_or_netCDF4 = has_scipy or has_
 requires_scipy_or_netCDF4 = pytest.mark.skipif(
     not has_scipy_or_netCDF4, reason="requires scipy or netCDF4"
 )
+has_h5netcdf_or_netCDF4 = has_h5netcdf or has_netCDF4
+requires_h5netcdf_or_netCDF4 = pytest.mark.skipif(
+    not has_h5netcdf_or_netCDF4, reason="requires h5netcdf or netCDF4"
+)
 has_numbagg_or_bottleneck = has_numbagg or has_bottleneck
 requires_numbagg_or_bottleneck = pytest.mark.skipif(
     not has_numbagg_or_bottleneck, reason="requires numbagg or bottleneck"
@@ -361,6 +388,14 @@ def create_test_data(
                 )
             ),
         )
+        if has_pyarrow:
+            obj["var5"] = (
+                "dim1",
+                pd.array(
+                    rs.integers(1, 10, size=dim_sizes[0]).tolist(),
+                    dtype="int64[pyarrow]",
+                ),
+            )
     if dim_sizes == _DEFAULT_TEST_DIM_SIZES:
         numbers_values = np.array([0, 1, 2, 0, 0, 1, 1, 2, 2, 3], dtype="int64")
     else:
@@ -384,7 +419,10 @@ _NON_STANDARD_CALENDARS = [
     pytest.param(cal, marks=requires_cftime)
     for cal in sorted(_NON_STANDARD_CALENDAR_NAMES)
 ]
-_STANDARD_CALENDARS = [pytest.param(cal) for cal in _STANDARD_CALENDAR_NAMES]
+_STANDARD_CALENDARS = [
+    pytest.param(cal, marks=requires_cftime if cal != "standard" else ())
+    for cal in _STANDARD_CALENDAR_NAMES
+]
 _ALL_CALENDARS = sorted(_STANDARD_CALENDARS + _NON_STANDARD_CALENDARS)
 _CFTIME_CALENDARS = [
     pytest.param(*p.values, marks=requires_cftime) for p in _ALL_CALENDARS
diff -pruN 2025.03.1-8/xarray/tests/arrays.py 2025.10.1-1/xarray/tests/arrays.py
--- 2025.03.1-8/xarray/tests/arrays.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/xarray/tests/arrays.py	2025-10-10 10:38:05.000000000 +0000
@@ -1,15 +1,15 @@
+"""
+This module contains various lazy array classes which can be wrapped and manipulated by xarray objects but will raise on data access.
+"""
+
 from collections.abc import Callable, Iterable
-from typing import Any
+from typing import Any, Self
 
 import numpy as np
 
 from xarray.core import utils
 from xarray.core.indexing import ExplicitlyIndexed
 
-"""
-This module contains various lazy array classes which can be wrapped and manipulated by xarray objects but will raise on data access.
-"""
-
 
 class UnexpectedDataAccess(Exception):
     pass
@@ -25,7 +25,7 @@ class InaccessibleArray(utils.NDArrayMix
         raise UnexpectedDataAccess("Tried accessing data")
 
     def __array__(
-        self, dtype: np.typing.DTypeLike = None, /, *, copy: bool | None = None
+        self, dtype: np.typing.DTypeLike | None = None, /, *, copy: bool | None = None
     ) -> np.ndarray:
         raise UnexpectedDataAccess("Tried accessing data")
 
@@ -56,7 +56,7 @@ class DuckArrayWrapper(utils.NDArrayMixi
         return self.array
 
     def __array__(
-        self, dtype: np.typing.DTypeLike = None, /, *, copy: bool | None = None
+        self, dtype: np.typing.DTypeLike | None = None, /, *, copy: bool | None = None
     ) -> np.ndarray:
         raise UnexpectedDataAccess("Tried accessing data")
 
@@ -126,6 +126,23 @@ def broadcast_to(
     return ConcatenatableArray(result)
 
 
+@implements(np.full_like)
+def full_like(
+    x: "ConcatenatableArray", /, fill_value, **kwargs
+) -> "ConcatenatableArray":
+    """
+    Broadcasts an array to a specified shape, by either manipulating chunk keys or copying chunk manifest entries.
+    """
+    if not isinstance(x, ConcatenatableArray):
+        raise TypeError
+    return ConcatenatableArray(np.full(x.shape, fill_value=fill_value, **kwargs))
+
+
+@implements(np.all)
+def numpy_all(x: "ConcatenatableArray", **kwargs) -> "ConcatenatableArray":
+    return type(x)(np.all(x._array, **kwargs))
+
+
 class ConcatenatableArray:
     """Disallows loading or coercing to an index but does support concatenation / stacking."""
 
@@ -152,11 +169,11 @@ class ConcatenatableArray:
         raise UnexpectedDataAccess("Tried accessing data")
 
     def __array__(
-        self, dtype: np.typing.DTypeLike = None, /, *, copy: bool | None = None
+        self, dtype: np.typing.DTypeLike | None = None, /, *, copy: bool | None = None
     ) -> np.ndarray:
         raise UnexpectedDataAccess("Tried accessing data")
 
-    def __getitem__(self, key) -> "ConcatenatableArray":
+    def __getitem__(self, key) -> Self:
         """Some cases of concat require supporting expanding dims by dimensions of size 1"""
         # see https://data-apis.org/array-api/2022.12/API_specification/indexing.html#multi-axis-indexing
         arr = self._array
@@ -167,7 +184,10 @@ class ConcatenatableArray:
                 pass
             else:
                 raise UnexpectedDataAccess("Tried accessing data.")
-        return ConcatenatableArray(arr)
+        return type(self)(arr)
+
+    def __eq__(self, other: Self) -> Self:  # type: ignore[override]
+        return type(self)(self._array == other._array)
 
     def __array_function__(self, func, types, args, kwargs) -> Any:
         if func not in CONCATENATABLEARRAY_HANDLED_ARRAY_FUNCTIONS:
@@ -184,9 +204,15 @@ class ConcatenatableArray:
         """We have to define this in order to convince xarray that this class is a duckarray, even though we will never support ufuncs."""
         return NotImplemented
 
-    def astype(self, dtype: np.dtype, /, *, copy: bool = True) -> "ConcatenatableArray":
+    def astype(self, dtype: np.dtype, /, *, copy: bool = True) -> Self:
         """Needed because xarray will call this even when it's a no-op"""
         if dtype != self.dtype:
             raise NotImplementedError()
         else:
             return self
+
+    def __and__(self, other: Self) -> Self:
+        return type(self)(self._array & other._array)
+
+    def __or__(self, other: Self) -> Self:
+        return type(self)(self._array | other._array)
diff -pruN 2025.03.1-8/xarray/tests/conftest.py 2025.10.1-1/xarray/tests/conftest.py
--- 2025.03.1-8/xarray/tests/conftest.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/xarray/tests/conftest.py	2025-10-10 10:38:05.000000000 +0000
@@ -1,5 +1,7 @@
 from __future__ import annotations
 
+import warnings
+
 import numpy as np
 import pandas as pd
 import pytest
@@ -9,6 +11,27 @@ from xarray import DataArray, Dataset, D
 from xarray.tests import create_test_data, has_cftime, requires_dask
 
 
+@pytest.fixture(autouse=True)
+def handle_numpy_1_warnings():
+    """Handle NumPy 1.x DeprecationWarnings for out-of-bound integer conversions.
+
+    NumPy 1.x raises DeprecationWarning when converting out-of-bounds values
+    (e.g., 255 to int8), while NumPy 2.x raises OverflowError. This fixture
+    suppresses the warning in NumPy 1.x environments to allow tests to pass.
+    """
+    # Only apply for NumPy < 2.0
+    if np.__version__.startswith("1."):
+        with warnings.catch_warnings():
+            warnings.filterwarnings(
+                "ignore",
+                "NumPy will stop allowing conversion of out-of-bound Python integers",
+                DeprecationWarning,
+            )
+            yield
+    else:
+        yield
+
+
 @pytest.fixture(params=["numpy", pytest.param("dask", marks=requires_dask)])
 def backend(request):
     return request.param
@@ -234,6 +257,6 @@ def simple_datatree(create_test_datatree
     return create_test_datatree()
 
 
-@pytest.fixture(scope="module", params=["s", "ms", "us", "ns"])
+@pytest.fixture(params=["s", "ms", "us", "ns"])
 def time_unit(request):
     return request.param
diff -pruN 2025.03.1-8/xarray/tests/indexes.py 2025.10.1-1/xarray/tests/indexes.py
--- 2025.03.1-8/xarray/tests/indexes.py	1970-01-01 00:00:00.000000000 +0000
+++ 2025.10.1-1/xarray/tests/indexes.py	2025-10-10 10:38:05.000000000 +0000
@@ -0,0 +1,73 @@
+from collections.abc import Hashable, Iterable, Mapping, Sequence
+from typing import Any
+
+import numpy as np
+
+from xarray import Variable
+from xarray.core.indexes import Index, PandasIndex
+from xarray.core.types import Self
+
+
+class ScalarIndex(Index):
+    def __init__(self, value: int):
+        self.value = value
+
+    @classmethod
+    def from_variables(cls, variables, *, options) -> Self:
+        var = next(iter(variables.values()))
+        return cls(int(var.values))
+
+    def equals(self, other, *, exclude=None) -> bool:
+        return isinstance(other, ScalarIndex) and other.value == self.value
+
+
+class XYIndex(Index):
+    def __init__(self, x: PandasIndex, y: PandasIndex):
+        self.x: PandasIndex = x
+        self.y: PandasIndex = y
+
+    @classmethod
+    def from_variables(cls, variables, *, options):
+        return cls(
+            x=PandasIndex.from_variables({"x": variables["x"]}, options=options),
+            y=PandasIndex.from_variables({"y": variables["y"]}, options=options),
+        )
+
+    def create_variables(
+        self, variables: Mapping[Any, Variable] | None = None
+    ) -> dict[Any, Variable]:
+        return self.x.create_variables() | self.y.create_variables()
+
+    def equals(self, other, exclude=None):
+        if exclude is None:
+            exclude = frozenset()
+        x_eq = True if self.x.dim in exclude else self.x.equals(other.x)
+        y_eq = True if self.y.dim in exclude else self.y.equals(other.y)
+        return x_eq and y_eq
+
+    @classmethod
+    def concat(
+        cls,
+        indexes: Sequence[Self],
+        dim: Hashable,
+        positions: Iterable[Iterable[int]] | None = None,
+    ) -> Self:
+        first = next(iter(indexes))
+        if dim == "x":
+            newx = PandasIndex.concat(
+                tuple(i.x for i in indexes), dim=dim, positions=positions
+            )
+            newy = first.y
+        elif dim == "y":
+            newx = first.x
+            newy = PandasIndex.concat(
+                tuple(i.y for i in indexes), dim=dim, positions=positions
+            )
+        return cls(x=newx, y=newy)
+
+    def isel(self, indexers: Mapping[Any, int | slice | np.ndarray | Variable]) -> Self:
+        newx = self.x.isel({"x": indexers.get("x", slice(None))})
+        newy = self.y.isel({"y": indexers.get("y", slice(None))})
+        assert newx is not None
+        assert newy is not None
+        return type(self)(newx, newy)
diff -pruN 2025.03.1-8/xarray/tests/test_accessor_dt.py 2025.10.1-1/xarray/tests/test_accessor_dt.py
--- 2025.03.1-8/xarray/tests/test_accessor_dt.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/xarray/tests/test_accessor_dt.py	2025-10-10 10:38:05.000000000 +0000
@@ -400,14 +400,14 @@ def calendar(request):
     return request.param
 
 
-@pytest.fixture()
+@pytest.fixture
 def cftime_date_type(calendar):
     if calendar == "standard":
         calendar = "proleptic_gregorian"
     return _all_cftime_date_types()[calendar]
 
 
-@pytest.fixture()
+@pytest.fixture
 def times(calendar):
     import cftime
 
@@ -419,7 +419,7 @@ def times(calendar):
     )
 
 
-@pytest.fixture()
+@pytest.fixture
 def data(times):
     data = np.random.rand(10, 10, _NT)
     lons = np.linspace(0, 11, 10)
@@ -429,7 +429,7 @@ def data(times):
     )
 
 
-@pytest.fixture()
+@pytest.fixture
 def times_3d(times):
     lons = np.linspace(0, 11, 10)
     lats = np.linspace(0, 20, 10)
diff -pruN 2025.03.1-8/xarray/tests/test_accessor_str.py 2025.10.1-1/xarray/tests/test_accessor_str.py
--- 2025.03.1-8/xarray/tests/test_accessor_str.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/xarray/tests/test_accessor_str.py	2025-10-10 10:38:05.000000000 +0000
@@ -139,7 +139,7 @@ def test_contains(dtype) -> None:
     pat_re = re.compile("(/w+)")
     with pytest.raises(
         ValueError,
-        match="Must use regular expression matching for regular expression object.",
+        match=r"Must use regular expression matching for regular expression object.",
     ):
         values.str.contains(pat_re, regex=False)
 
@@ -482,17 +482,17 @@ def test_replace_compiled_regex(dtype) -
     pat3 = re.compile(dtype("BAD[_]*"))
 
     with pytest.raises(
-        ValueError, match="Flags cannot be set when pat is a compiled regex."
+        ValueError, match=r"Flags cannot be set when pat is a compiled regex."
     ):
         result = values.str.replace(pat3, "", flags=re.IGNORECASE)
 
     with pytest.raises(
-        ValueError, match="Case cannot be set when pat is a compiled regex."
+        ValueError, match=r"Case cannot be set when pat is a compiled regex."
     ):
         result = values.str.replace(pat3, "", case=False)
 
     with pytest.raises(
-        ValueError, match="Case cannot be set when pat is a compiled regex."
+        ValueError, match=r"Case cannot be set when pat is a compiled regex."
     ):
         result = values.str.replace(pat3, "", case=True)
 
@@ -555,22 +555,22 @@ def test_extract_extractall_findall_empt
 
     value = xr.DataArray([["a"]], dims=["X", "Y"]).astype(dtype)
 
-    with pytest.raises(ValueError, match="No capture groups found in pattern."):
+    with pytest.raises(ValueError, match=r"No capture groups found in pattern."):
         value.str.extract(pat=pat_str, dim="ZZ")
 
-    with pytest.raises(ValueError, match="No capture groups found in pattern."):
+    with pytest.raises(ValueError, match=r"No capture groups found in pattern."):
         value.str.extract(pat=pat_re, dim="ZZ")
 
-    with pytest.raises(ValueError, match="No capture groups found in pattern."):
+    with pytest.raises(ValueError, match=r"No capture groups found in pattern."):
         value.str.extractall(pat=pat_str, group_dim="XX", match_dim="YY")
 
-    with pytest.raises(ValueError, match="No capture groups found in pattern."):
+    with pytest.raises(ValueError, match=r"No capture groups found in pattern."):
         value.str.extractall(pat=pat_re, group_dim="XX", match_dim="YY")
 
-    with pytest.raises(ValueError, match="No capture groups found in pattern."):
+    with pytest.raises(ValueError, match=r"No capture groups found in pattern."):
         value.str.findall(pat=pat_str)
 
-    with pytest.raises(ValueError, match="No capture groups found in pattern."):
+    with pytest.raises(ValueError, match=r"No capture groups found in pattern."):
         value.str.findall(pat=pat_re)
 
 
@@ -582,13 +582,13 @@ def test_extract_multi_None_raises(dtype
 
     with pytest.raises(
         ValueError,
-        match="Dimension must be specified if more than one capture group is given.",
+        match=r"Dimension must be specified if more than one capture group is given.",
     ):
         value.str.extract(pat=pat_str, dim=None)
 
     with pytest.raises(
         ValueError,
-        match="Dimension must be specified if more than one capture group is given.",
+        match=r"Dimension must be specified if more than one capture group is given.",
     ):
         value.str.extract(pat=pat_re, dim=None)
 
@@ -600,32 +600,32 @@ def test_extract_extractall_findall_case
     value = xr.DataArray([["a"]], dims=["X", "Y"]).astype(dtype)
 
     with pytest.raises(
-        ValueError, match="Case cannot be set when pat is a compiled regex."
+        ValueError, match=r"Case cannot be set when pat is a compiled regex."
     ):
         value.str.extract(pat=pat_re, case=True, dim="ZZ")
 
     with pytest.raises(
-        ValueError, match="Case cannot be set when pat is a compiled regex."
+        ValueError, match=r"Case cannot be set when pat is a compiled regex."
     ):
         value.str.extract(pat=pat_re, case=False, dim="ZZ")
 
     with pytest.raises(
-        ValueError, match="Case cannot be set when pat is a compiled regex."
+        ValueError, match=r"Case cannot be set when pat is a compiled regex."
     ):
         value.str.extractall(pat=pat_re, case=True, group_dim="XX", match_dim="YY")
 
     with pytest.raises(
-        ValueError, match="Case cannot be set when pat is a compiled regex."
+        ValueError, match=r"Case cannot be set when pat is a compiled regex."
     ):
         value.str.extractall(pat=pat_re, case=False, group_dim="XX", match_dim="YY")
 
     with pytest.raises(
-        ValueError, match="Case cannot be set when pat is a compiled regex."
+        ValueError, match=r"Case cannot be set when pat is a compiled regex."
     ):
         value.str.findall(pat=pat_re, case=True)
 
     with pytest.raises(
-        ValueError, match="Case cannot be set when pat is a compiled regex."
+        ValueError, match=r"Case cannot be set when pat is a compiled regex."
     ):
         value.str.findall(pat=pat_re, case=False)
 
@@ -636,39 +636,39 @@ def test_extract_extractall_name_collisi
 
     value = xr.DataArray([["a"]], dims=["X", "Y"]).astype(dtype)
 
-    with pytest.raises(KeyError, match="Dimension 'X' already present in DataArray."):
+    with pytest.raises(KeyError, match=r"Dimension 'X' already present in DataArray."):
         value.str.extract(pat=pat_str, dim="X")
 
-    with pytest.raises(KeyError, match="Dimension 'X' already present in DataArray."):
+    with pytest.raises(KeyError, match=r"Dimension 'X' already present in DataArray."):
         value.str.extract(pat=pat_re, dim="X")
 
     with pytest.raises(
-        KeyError, match="Group dimension 'X' already present in DataArray."
+        KeyError, match=r"Group dimension 'X' already present in DataArray."
     ):
         value.str.extractall(pat=pat_str, group_dim="X", match_dim="ZZ")
 
     with pytest.raises(
-        KeyError, match="Group dimension 'X' already present in DataArray."
+        KeyError, match=r"Group dimension 'X' already present in DataArray."
     ):
         value.str.extractall(pat=pat_re, group_dim="X", match_dim="YY")
 
     with pytest.raises(
-        KeyError, match="Match dimension 'Y' already present in DataArray."
+        KeyError, match=r"Match dimension 'Y' already present in DataArray."
     ):
         value.str.extractall(pat=pat_str, group_dim="XX", match_dim="Y")
 
     with pytest.raises(
-        KeyError, match="Match dimension 'Y' already present in DataArray."
+        KeyError, match=r"Match dimension 'Y' already present in DataArray."
     ):
         value.str.extractall(pat=pat_re, group_dim="XX", match_dim="Y")
 
     with pytest.raises(
-        KeyError, match="Group dimension 'ZZ' is the same as match dimension 'ZZ'."
+        KeyError, match=r"Group dimension 'ZZ' is the same as match dimension 'ZZ'."
     ):
         value.str.extractall(pat=pat_str, group_dim="ZZ", match_dim="ZZ")
 
     with pytest.raises(
-        KeyError, match="Group dimension 'ZZ' is the same as match dimension 'ZZ'."
+        KeyError, match=r"Group dimension 'ZZ' is the same as match dimension 'ZZ'."
     ):
         value.str.extractall(pat=pat_re, group_dim="ZZ", match_dim="ZZ")
 
@@ -906,7 +906,7 @@ def test_extractall_single_single_nocase
     pat_re: str | bytes = (
         pat_str if dtype == np.str_ else bytes(pat_str, encoding="UTF-8")
     )
-    pat_compiled = re.compile(pat_re, flags=re.I)
+    pat_compiled = re.compile(pat_re, flags=re.IGNORECASE)
 
     value = xr.DataArray(
         [["a_Xy_0", "ab_xY_10", "abc_Xy_01"], ["abcd_Xy_", "", "abcdef_Xy_101"]],
@@ -981,7 +981,7 @@ def test_extractall_single_multi_nocase(
     pat_re: str | bytes = (
         pat_str if dtype == np.str_ else bytes(pat_str, encoding="UTF-8")
     )
-    pat_compiled = re.compile(pat_re, flags=re.I)
+    pat_compiled = re.compile(pat_re, flags=re.IGNORECASE)
 
     value = xr.DataArray(
         [
@@ -1063,7 +1063,7 @@ def test_extractall_multi_single_nocase(
     pat_re: str | bytes = (
         pat_str if dtype == np.str_ else bytes(pat_str, encoding="UTF-8")
     )
-    pat_compiled = re.compile(pat_re, flags=re.I)
+    pat_compiled = re.compile(pat_re, flags=re.IGNORECASE)
 
     value = xr.DataArray(
         [["a_Xy_0", "ab_xY_10", "abc_Xy_01"], ["abcd_Xy_", "", "abcdef_Xy_101"]],
@@ -1145,7 +1145,7 @@ def test_extractall_multi_multi_nocase(d
     pat_re: str | bytes = (
         pat_str if dtype == np.str_ else bytes(pat_str, encoding="UTF-8")
     )
-    pat_compiled = re.compile(pat_re, flags=re.I)
+    pat_compiled = re.compile(pat_re, flags=re.IGNORECASE)
 
     value = xr.DataArray(
         [
@@ -1245,7 +1245,7 @@ def test_findall_single_single_case(dtyp
 
 def test_findall_single_single_nocase(dtype) -> None:
     pat_str = r"(\w+)_Xy_\d*"
-    pat_re = re.compile(dtype(pat_str), flags=re.I)
+    pat_re = re.compile(dtype(pat_str), flags=re.IGNORECASE)
 
     value = xr.DataArray(
         [["a_Xy_0", "ab_xY_10", "abc_Xy_01"], ["abcd_Xy_", "", "abcdef_Xy_101"]],
@@ -1313,7 +1313,7 @@ def test_findall_single_multi_case(dtype
 
 def test_findall_single_multi_nocase(dtype) -> None:
     pat_str = r"(\w+)_Xy_\d*"
-    pat_re = re.compile(dtype(pat_str), flags=re.I)
+    pat_re = re.compile(dtype(pat_str), flags=re.IGNORECASE)
 
     value = xr.DataArray(
         [
@@ -1387,7 +1387,7 @@ def test_findall_multi_single_case(dtype
 
 def test_findall_multi_single_nocase(dtype) -> None:
     pat_str = r"(\w+)_Xy_(\d*)"
-    pat_re = re.compile(dtype(pat_str), flags=re.I)
+    pat_re = re.compile(dtype(pat_str), flags=re.IGNORECASE)
 
     value = xr.DataArray(
         [["a_Xy_0", "ab_xY_10", "abc_Xy_01"], ["abcd_Xy_", "", "abcdef_Xy_101"]],
@@ -1463,7 +1463,7 @@ def test_findall_multi_multi_case(dtype)
 
 def test_findall_multi_multi_nocase(dtype) -> None:
     pat_str = r"(\w+)_Xy_(\d*)"
-    pat_re = re.compile(dtype(pat_str), flags=re.I)
+    pat_re = re.compile(dtype(pat_str), flags=re.IGNORECASE)
 
     value = xr.DataArray(
         [
@@ -3526,7 +3526,7 @@ def test_join_2d(dtype) -> None:
     assert_identical(res_space_y, targ_space_y)
 
     with pytest.raises(
-        ValueError, match="Dimension must be specified for multidimensional arrays."
+        ValueError, match=r"Dimension must be specified for multidimensional arrays."
     ):
         values.str.join()
 
diff -pruN 2025.03.1-8/xarray/tests/test_assertions.py 2025.10.1-1/xarray/tests/test_assertions.py
--- 2025.03.1-8/xarray/tests/test_assertions.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/xarray/tests/test_assertions.py	2025-10-10 10:38:05.000000000 +0000
@@ -57,6 +57,11 @@ def test_allclose_regression() -> None:
             xr.DataArray(np.array("b", dtype="|S1")),
             id="DataArray_with_character_dtype",
         ),
+        pytest.param(
+            xr.Coordinates({"x": [1e-17, 2]}),
+            xr.Coordinates({"x": [0, 3]}),
+            id="Coordinates",
+        ),
     ),
 )
 def test_assert_allclose(obj1, obj2) -> None:
@@ -83,6 +88,19 @@ def test_assert_allclose_equal_transpose
     getattr(xr.testing, func)(ds1, ds2, check_dim_order=False)
 
 
+def test_assert_equal_transpose_datatree() -> None:
+    """Ensure `check_dim_order=False` works for transposed DataTree"""
+    ds = xr.Dataset(data_vars={"data": (("x", "y"), [[1, 2]])})
+
+    a = xr.DataTree.from_dict({"node": ds})
+    b = xr.DataTree.from_dict({"node": ds.transpose("y", "x")})
+
+    with pytest.raises(AssertionError):
+        xr.testing.assert_equal(a, b)
+
+    xr.testing.assert_equal(a, b, check_dim_order=False)
+
+
 @pytest.mark.filterwarnings("error")
 @pytest.mark.parametrize(
     "duckarray",
@@ -174,7 +192,11 @@ def test_ensure_warnings_not_elevated(fu
             return super().dims
 
         def __array__(
-            self, dtype: np.typing.DTypeLike = None, /, *, copy: bool | None = None
+            self,
+            dtype: np.typing.DTypeLike | None = None,
+            /,
+            *,
+            copy: bool | None = None,
         ) -> np.ndarray:
             warnings.warn("warning in test", stacklevel=2)
             return super().__array__(dtype, copy=copy)
diff -pruN 2025.03.1-8/xarray/tests/test_backends.py 2025.10.1-1/xarray/tests/test_backends.py
--- 2025.03.1-8/xarray/tests/test_backends.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/xarray/tests/test_backends.py	2025-10-10 10:38:05.000000000 +0000
@@ -1,5 +1,6 @@
 from __future__ import annotations
 
+import asyncio
 import contextlib
 import gzip
 import itertools
@@ -16,6 +17,7 @@ import warnings
 from collections import ChainMap
 from collections.abc import Generator, Iterator, Mapping
 from contextlib import ExitStack
+from importlib import import_module
 from io import BytesIO
 from pathlib import Path
 from typing import TYPE_CHECKING, Any, Final, Literal, cast
@@ -28,12 +30,15 @@ from packaging.version import Version
 from pandas.errors import OutOfBoundsDatetime
 
 import xarray as xr
+import xarray.testing as xrt
 from xarray import (
     DataArray,
     Dataset,
+    DataTree,
     backends,
     load_dataarray,
     load_dataset,
+    load_datatree,
     open_dataarray,
     open_dataset,
     open_mfdataset,
@@ -55,9 +60,12 @@ from xarray.coding.strings import check_
 from xarray.coding.variables import SerializationWarning
 from xarray.conventions import encode_dataset_coordinates
 from xarray.core import indexing
+from xarray.core.indexes import PandasIndex
 from xarray.core.options import set_options
+from xarray.core.types import PDDatetimeUnitOptions
 from xarray.core.utils import module_available
 from xarray.namedarray.pycompat import array_type
+from xarray.structure.alignment import AlignmentError
 from xarray.tests import (
     assert_allclose,
     assert_array_equal,
@@ -71,13 +79,17 @@ from xarray.tests import (
     has_scipy,
     has_zarr,
     has_zarr_v3,
+    has_zarr_v3_async_oindex,
+    has_zarr_v3_dtypes,
     mock,
     network,
+    parametrize_zarr_format,
     requires_cftime,
     requires_dask,
     requires_fsspec,
     requires_h5netcdf,
     requires_h5netcdf_1_4_0_or_above,
+    requires_h5netcdf_or_netCDF4,
     requires_h5netcdf_ros3,
     requires_iris,
     requires_netcdf,
@@ -88,6 +100,7 @@ from xarray.tests import (
     requires_scipy,
     requires_scipy_or_netCDF4,
     requires_zarr,
+    requires_zarr_v3,
 )
 from xarray.tests.test_coding_times import (
     _ALL_CALENDARS,
@@ -100,17 +113,15 @@ from xarray.tests.test_dataset import (
     create_test_data,
 )
 
-try:
+with contextlib.suppress(ImportError):
     import netCDF4 as nc4
-except ImportError:
-    pass
 
-try:
+with contextlib.suppress(ImportError):
     import dask
     import dask.array as da
-except ImportError:
-    pass
 
+with contextlib.suppress(ImportError):
+    import fsspec
 
 if has_zarr:
     import zarr
@@ -118,6 +129,7 @@ if has_zarr:
 
     if has_zarr_v3:
         from zarr.storage import MemoryStore as KVStore
+        from zarr.storage import WrapperStore
 
         ZARR_FORMATS = [2, 3]
     else:
@@ -128,8 +140,11 @@ if has_zarr:
             )
         except ImportError:
             KVStore = None  # type: ignore[assignment,misc,unused-ignore]
+
+        WrapperStore = object  # type: ignore[assignment,misc,unused-ignore]
 else:
     KVStore = None  # type: ignore[assignment,misc,unused-ignore]
+    WrapperStore = object  # type: ignore[assignment,misc,unused-ignore]
     ZARR_FORMATS = []
 
 
@@ -154,6 +169,69 @@ def skip_if_zarr_format_2(reason: str):
 
 ON_WINDOWS = sys.platform == "win32"
 default_value = object()
+
+
+def _check_compression_codec_available(codec: str | None) -> bool:
+    """Check if a compression codec is available in the netCDF4 library.
+
+    Parameters
+    ----------
+    codec : str or None
+        The compression codec name (e.g., 'zstd', 'blosc_lz', etc.)
+
+    Returns
+    -------
+    bool
+        True if the codec is available, False otherwise.
+    """
+    if codec is None or codec in ("zlib", "szip"):
+        # These are standard and should be available
+        return True
+
+    if not has_netCDF4:
+        return False
+
+    try:
+        import os
+
+        import netCDF4
+
+        # Try to create a file with the compression to test availability
+        with tempfile.NamedTemporaryFile(suffix=".nc", delete=False) as tmp:
+            tmp_path = tmp.name
+
+        try:
+            nc = netCDF4.Dataset(tmp_path, "w", format="NETCDF4")
+            nc.createDimension("x", 10)
+
+            # Attempt to create a variable with the compression
+            if codec and codec.startswith("blosc"):
+                nc.createVariable(  # type: ignore[call-overload]
+                    varname="test",
+                    datatype="f4",
+                    dimensions=("x",),
+                    compression=codec,
+                    blosc_shuffle=1,
+                )
+            else:
+                nc.createVariable(  # type: ignore[call-overload]
+                    varname="test", datatype="f4", dimensions=("x",), compression=codec
+                )
+
+            nc.close()
+            os.unlink(tmp_path)
+            return True
+        except (RuntimeError, netCDF4.NetCDF4MissingFeatureException):
+            # Codec not available
+            if os.path.exists(tmp_path):
+                with contextlib.suppress(OSError):
+                    os.unlink(tmp_path)
+            return False
+    except Exception:
+        # Any other error, assume codec is not available
+        return False
+
+
 dask_array_type = array_type("dask")
 
 if TYPE_CHECKING:
@@ -302,7 +380,15 @@ def create_encoded_unsigned_false_masked
 
 def create_boolean_data() -> Dataset:
     attributes = {"units": "-"}
-    return Dataset({"x": ("t", [True, False, False, True], attributes)})
+    return Dataset(
+        {
+            "x": (
+                ("t", "x"),
+                [[False, True, False, True], [True, False, False, True]],
+                attributes,
+            )
+        }
+    )
 
 
 class TestCommon:
@@ -333,6 +419,11 @@ class TestCommon:
 class NetCDF3Only:
     netcdf3_formats: tuple[T_NetcdfTypes, ...] = ("NETCDF3_CLASSIC", "NETCDF3_64BIT")
 
+    @pytest.mark.asyncio
+    @pytest.mark.skip(reason="NetCDF backends don't support async loading")
+    async def test_load_async(self) -> None:
+        pass
+
     @requires_scipy
     def test_dtype_coercion_error(self) -> None:
         """Failing dtype coercion should lead to an error"""
@@ -443,6 +534,7 @@ class DatasetIOBase:
             assert_identical(expected, actual)
 
     def test_load(self) -> None:
+        # Note: please keep this in sync with test_load_async below as much as possible!
         expected = create_test_data()
 
         @contextlib.contextmanager
@@ -475,6 +567,43 @@ class DatasetIOBase:
             actual = ds.load()
         assert_identical(expected, actual)
 
+    @pytest.mark.asyncio
+    async def test_load_async(self) -> None:
+        # Note: please keep this in sync with test_load above as much as possible!
+
+        # Copied from `test_load` on the base test class, but won't work for netcdf
+        expected = create_test_data()
+
+        @contextlib.contextmanager
+        def assert_loads(vars=None):
+            if vars is None:
+                vars = expected
+            with self.roundtrip(expected) as actual:
+                for k, v in actual.variables.items():
+                    # IndexVariables are eagerly loaded into memory
+                    assert v._in_memory == (k in actual.dims)
+                yield actual
+                for k, v in actual.variables.items():
+                    if k in vars:
+                        assert v._in_memory
+                assert_identical(expected, actual)
+
+        with pytest.raises(AssertionError):
+            # make sure the contextmanager works!
+            with assert_loads() as ds:
+                pass
+
+        with assert_loads() as ds:
+            await ds.load_async()
+
+        with assert_loads(["var1", "dim1", "dim2"]) as ds:
+            await ds["var1"].load_async()
+
+        # verify we can read data even after closing the file
+        with self.roundtrip(expected) as ds:
+            actual = await ds.load_async()
+        assert_identical(expected, actual)
+
     def test_dataset_compute(self) -> None:
         expected = create_test_data()
 
@@ -497,22 +626,17 @@ class DatasetIOBase:
     def test_pickle(self) -> None:
         expected = Dataset({"foo": ("x", [42])})
         with self.roundtrip(expected, allow_cleanup_failure=ON_WINDOWS) as roundtripped:
-            with roundtripped:
-                # Windows doesn't like reopening an already open file
-                raw_pickle = pickle.dumps(roundtripped)
+            # Windows doesn't like reopening an already open file
+            raw_pickle = pickle.dumps(roundtripped)
             with pickle.loads(raw_pickle) as unpickled_ds:
                 assert_identical(expected, unpickled_ds)
 
-    @pytest.mark.filterwarnings("ignore:deallocating CachingFileManager")
     def test_pickle_dataarray(self) -> None:
         expected = Dataset({"foo": ("x", [42])})
         with self.roundtrip(expected, allow_cleanup_failure=ON_WINDOWS) as roundtripped:
-            with roundtripped:
-                raw_pickle = pickle.dumps(roundtripped["foo"])
-            # TODO: figure out how to explicitly close the file for the
-            # unpickled DataArray?
-            unpickled = pickle.loads(raw_pickle)
-            assert_identical(expected["foo"], unpickled)
+            raw_pickle = pickle.dumps(roundtripped["foo"])
+            with pickle.loads(raw_pickle) as unpickled:
+                assert_identical(expected["foo"], unpickled)
 
     def test_dataset_caching(self) -> None:
         expected = Dataset({"foo": ("x", [5, 6, 7])})
@@ -528,7 +652,6 @@ class DatasetIOBase:
             _ = actual.foo.values  # no caching
             assert not actual.foo.variable._in_memory
 
-    @pytest.mark.filterwarnings("ignore:deallocating CachingFileManager")
     def test_roundtrip_None_variable(self) -> None:
         expected = Dataset({None: (("x", "y"), [[0, 1], [2, 3]])})
         with self.roundtrip(expected) as actual:
@@ -635,12 +758,25 @@ class DatasetIOBase:
         #  though we cannot test that until we fix the timedelta decoding
         #  to support large ranges
         time_deltas = pd.to_timedelta(["1h", "2h", "NaT"]).as_unit("s")  # type: ignore[arg-type, unused-ignore]
+        encoding = {"units": "seconds"}
         expected = Dataset({"td": ("td", time_deltas), "td0": time_deltas[0]})
+        expected["td"].encoding = encoding
+        expected["td0"].encoding = encoding
         with self.roundtrip(
             expected, open_kwargs={"decode_timedelta": CFTimedeltaCoder(time_unit="ns")}
         ) as actual:
             assert_identical(expected, actual)
 
+    def test_roundtrip_timedelta_data_via_dtype(
+        self, time_unit: PDDatetimeUnitOptions
+    ) -> None:
+        time_deltas = pd.to_timedelta(["1h", "2h", "NaT"]).as_unit(time_unit)  # type: ignore[arg-type, unused-ignore]
+        expected = Dataset(
+            {"td": ("td", time_deltas), "td0": time_deltas[0].to_numpy()}
+        )
+        with self.roundtrip(expected) as actual:
+            assert_identical(expected, actual)
+
     def test_roundtrip_float64_data(self) -> None:
         expected = Dataset({"x": ("y", np.array([1.0, 2.0, np.pi], dtype="float64"))})
         with self.roundtrip(expected) as actual:
@@ -707,6 +843,9 @@ class DatasetIOBase:
             with self.roundtrip(actual) as actual2:
                 assert_identical(original, actual2)
                 assert actual2["x"].dtype == "bool"
+            with self.roundtrip(actual) as actual3:
+                # GH10536
+                assert_identical(original.transpose(), actual3.transpose())
 
     def test_orthogonal_indexing(self) -> None:
         in_memory = create_test_data()
@@ -850,15 +989,14 @@ class DatasetIOBase:
             if hasattr(obj, "array"):
                 if isinstance(obj.array, indexing.ExplicitlyIndexed):
                     find_and_validate_array(obj.array)
+                elif isinstance(obj.array, np.ndarray):
+                    assert isinstance(obj, indexing.NumpyIndexingAdapter)
+                elif isinstance(obj.array, dask_array_type):
+                    assert isinstance(obj, indexing.DaskIndexingAdapter)
+                elif isinstance(obj.array, pd.Index):
+                    assert isinstance(obj, indexing.PandasIndexingAdapter)
                 else:
-                    if isinstance(obj.array, np.ndarray):
-                        assert isinstance(obj, indexing.NumpyIndexingAdapter)
-                    elif isinstance(obj.array, dask_array_type):
-                        assert isinstance(obj, indexing.DaskIndexingAdapter)
-                    elif isinstance(obj.array, pd.Index):
-                        assert isinstance(obj, indexing.PandasIndexingAdapter)
-                    else:
-                        raise TypeError(f"{type(obj.array)} is wrapped by {type(obj)}")
+                    raise TypeError(f"{type(obj.array)} is wrapped by {type(obj)}")
 
         for v in ds.variables.values():
             find_and_validate_array(v._data)
@@ -1197,7 +1335,7 @@ class CFEncodedBase(DatasetIOBase):
 
     def test_coordinates_encoding(self) -> None:
         def equals_latlon(obj):
-            return obj == "lat lon" or obj == "lon lat"
+            return obj in {"lat lon", "lon lat"}
 
         original = Dataset(
             {"temp": ("x", [0, 1]), "precip": ("x", [0, -1])},
@@ -1294,6 +1432,47 @@ class CFEncodedBase(DatasetIOBase):
             with self.roundtrip(ds, save_kwargs=kwargs) as actual:
                 pass
 
+    def test_encoding_unlimited_dims(self) -> None:
+        if isinstance(self, ZarrBase):
+            pytest.skip("No unlimited_dims handled in zarr.")
+        ds = Dataset({"x": ("y", np.arange(10.0))})
+        with self.roundtrip(ds, save_kwargs=dict(unlimited_dims=["y"])) as actual:
+            assert actual.encoding["unlimited_dims"] == set("y")
+            assert_equal(ds, actual)
+
+        # Regression test for https://github.com/pydata/xarray/issues/2134
+        with self.roundtrip(ds, save_kwargs=dict(unlimited_dims="y")) as actual:
+            assert actual.encoding["unlimited_dims"] == set("y")
+            assert_equal(ds, actual)
+
+        ds.encoding = {"unlimited_dims": ["y"]}
+        with self.roundtrip(ds) as actual:
+            assert actual.encoding["unlimited_dims"] == set("y")
+            assert_equal(ds, actual)
+
+        # Regression test for https://github.com/pydata/xarray/issues/2134
+        ds.encoding = {"unlimited_dims": "y"}
+        with self.roundtrip(ds) as actual:
+            assert actual.encoding["unlimited_dims"] == set("y")
+            assert_equal(ds, actual)
+
+        # test unlimited_dims validation
+        # https://github.com/pydata/xarray/issues/10549
+        ds.encoding = {"unlimited_dims": "z"}
+        with pytest.warns(
+            UserWarning,
+            match=r"Unlimited dimension\(s\) .* declared in 'dataset.encoding'",
+        ):
+            with self.roundtrip(ds) as _:
+                pass
+        ds.encoding = {}
+        with pytest.raises(
+            ValueError,
+            match=r"Unlimited dimension\(s\) .* declared in 'unlimited_dims-kwarg'",
+        ):
+            with self.roundtrip(ds, save_kwargs=dict(unlimited_dims=["z"])) as _:
+                pass
+
     def test_encoding_kwarg_dates(self) -> None:
         ds = Dataset({"t": pd.date_range("2000-01-01", periods=3)})
         units = "days since 1900-01-01"
@@ -1427,10 +1606,34 @@ class CFEncodedBase(DatasetIOBase):
             with self.roundtrip(original) as actual:
                 assert_identical(original, actual)
 
+    @pytest.mark.parametrize(
+        "indexer",
+        (
+            {"y": [1]},
+            {"y": slice(2)},
+            {"y": 1},
+            {"x": [1], "y": [1]},
+            {"x": ("x0", [0, 1]), "y": ("x0", [0, 1])},
+        ),
+    )
+    def test_indexing_roundtrip(self, indexer) -> None:
+        # regression test for GH8909
+        ds = xr.Dataset()
+        ds["A"] = xr.DataArray([[1, "a"], [2, "b"]], dims=["x", "y"])
+        with self.roundtrip(ds) as ds2:
+            expected = ds2.sel(indexer)
+            with self.roundtrip(expected) as actual:
+                assert_identical(actual, expected)
+
 
 class NetCDFBase(CFEncodedBase):
     """Tests for all netCDF3 and netCDF4 backends."""
 
+    @pytest.mark.asyncio
+    @pytest.mark.skip(reason="NetCDF backends don't support async loading")
+    async def test_load_async(self) -> None:
+        await super().test_load_async()
+
     @pytest.mark.skipif(
         ON_WINDOWS, reason="Windows does not allow modifying open files"
     )
@@ -1561,6 +1764,17 @@ class NetCDF4Base(NetCDFBase):
             with self.open(tmp_file, group="data/2") as actual2:
                 assert_identical(data2, actual2)
 
+    def test_child_group_with_inconsistent_dimensions(self) -> None:
+        base = Dataset(coords={"x": [1, 2]})
+        child = Dataset(coords={"x": [1, 2, 3]})
+        with create_tmp_file() as tmp_file:
+            self.save(base, tmp_file)
+            self.save(child, tmp_file, group="child", mode="a")
+            with self.open(tmp_file) as actual_base:
+                assert_identical(base, actual_base)
+            with self.open(tmp_file, group="child") as actual_child:
+                assert_identical(child, actual_child)
+
     @pytest.mark.parametrize(
         "input_strings, is_bytes",
         [
@@ -1745,7 +1959,7 @@ class NetCDF4Base(NetCDFBase):
                 (1, y_chunksize, x_chunksize),
                 open_kwargs={"chunks": "auto"},
             ) as ds:
-                t_chunks, y_chunks, x_chunks = ds["image"].data.chunks
+                _t_chunks, y_chunks, x_chunks = ds["image"].data.chunks
                 assert all(np.asanyarray(y_chunks) == y_chunksize)
                 # Check that the chunk size is a multiple of the file chunk size
                 assert all(np.asanyarray(x_chunks) % x_chunksize == 0)
@@ -1869,16 +2083,6 @@ class NetCDF4Base(NetCDFBase):
                 with open_dataset(tmp_file, **cast(dict, kwargs)) as actual:
                     assert_identical(expected, actual)
 
-    def test_encoding_unlimited_dims(self) -> None:
-        ds = Dataset({"x": ("y", np.arange(10.0))})
-        with self.roundtrip(ds, save_kwargs=dict(unlimited_dims=["y"])) as actual:
-            assert actual.encoding["unlimited_dims"] == set("y")
-            assert_equal(ds, actual)
-        ds.encoding = {"unlimited_dims": ["y"]}
-        with self.roundtrip(ds) as actual:
-            assert actual.encoding["unlimited_dims"] == set("y")
-            assert_equal(ds, actual)
-
     def test_raise_on_forward_slashes_in_names(self) -> None:
         # test for forward slash in variable names and dimensions
         # see GH 7943
@@ -1907,7 +2111,7 @@ class NetCDF4Base(NetCDFBase):
                     fill_value=None,
                 )
                 v[:] = 1
-            with open_dataset(tmp_file) as original:
+            with open_dataset(tmp_file, engine="netcdf4") as original:
                 save_kwargs = {}
                 # We don't expect any errors.
                 # This is effectively a void context manager
@@ -1959,7 +2163,7 @@ class NetCDF4Base(NetCDFBase):
                     "time",
                     fill_value=255,
                 )
-            with open_dataset(tmp_file) as original:
+            with open_dataset(tmp_file, engine="netcdf4") as original:
                 save_kwargs = {}
                 if self.engine == "h5netcdf" and not has_h5netcdf_1_4_0_or_above:
                     save_kwargs["invalid_netcdf"] = True
@@ -2008,7 +2212,7 @@ class NetCDF4Base(NetCDFBase):
                     "time",
                     fill_value=255,
                 )
-            with open_dataset(tmp_file) as original:
+            with open_dataset(tmp_file, engine="netcdf4") as original:
                 assert (
                     original.clouds.encoding["dtype"].metadata
                     == original.tifa.encoding["dtype"].metadata
@@ -2024,13 +2228,33 @@ class NetCDF4Base(NetCDFBase):
                     with pytest.raises(
                         ValueError,
                         match=(
-                            "Cannot save variable .*"
-                            " because an enum `cloud_type` already exists in the Dataset .*"
+                            r"Cannot save variable .*"
+                            r" because an enum `cloud_type` already exists in the Dataset .*"
                         ),
                     ):
                         with self.roundtrip(original):
                             pass
 
+    @pytest.mark.parametrize("create_default_indexes", [True, False])
+    def test_create_default_indexes(self, tmp_path, create_default_indexes) -> None:
+        store_path = tmp_path / "tmp.nc"
+        original_ds = xr.Dataset(
+            {"data": ("x", np.arange(3))}, coords={"x": [-1, 0, 1]}
+        )
+        original_ds.to_netcdf(store_path, engine=self.engine, mode="w")
+
+        with open_dataset(
+            store_path,
+            engine=self.engine,
+            create_default_indexes=create_default_indexes,
+        ) as loaded_ds:
+            if create_default_indexes:
+                assert list(loaded_ds.xindexes) == ["x"] and isinstance(
+                    loaded_ds.xindexes["x"], PandasIndex
+                )
+            else:
+                assert len(loaded_ds.xindexes) == 0
+
 
 @requires_netCDF4
 class TestNetCDF4Data(NetCDF4Base):
@@ -2097,12 +2321,48 @@ class TestNetCDF4Data(NetCDF4Base):
             None,
             "zlib",
             "szip",
-            "zstd",
-            "blosc_lz",
-            "blosc_lz4",
-            "blosc_lz4hc",
-            "blosc_zlib",
-            "blosc_zstd",
+            pytest.param(
+                "zstd",
+                marks=pytest.mark.xfail(
+                    not _check_compression_codec_available("zstd"),
+                    reason="zstd codec not available in netCDF4 installation",
+                ),
+            ),
+            pytest.param(
+                "blosc_lz",
+                marks=pytest.mark.xfail(
+                    not _check_compression_codec_available("blosc_lz"),
+                    reason="blosc_lz codec not available in netCDF4 installation",
+                ),
+            ),
+            pytest.param(
+                "blosc_lz4",
+                marks=pytest.mark.xfail(
+                    not _check_compression_codec_available("blosc_lz4"),
+                    reason="blosc_lz4 codec not available in netCDF4 installation",
+                ),
+            ),
+            pytest.param(
+                "blosc_lz4hc",
+                marks=pytest.mark.xfail(
+                    not _check_compression_codec_available("blosc_lz4hc"),
+                    reason="blosc_lz4hc codec not available in netCDF4 installation",
+                ),
+            ),
+            pytest.param(
+                "blosc_zlib",
+                marks=pytest.mark.xfail(
+                    not _check_compression_codec_available("blosc_zlib"),
+                    reason="blosc_zlib codec not available in netCDF4 installation",
+                ),
+            ),
+            pytest.param(
+                "blosc_zstd",
+                marks=pytest.mark.xfail(
+                    not _check_compression_codec_available("blosc_zstd"),
+                    reason="blosc_zstd codec not available in netCDF4 installation",
+                ),
+            ),
         ],
     )
     @requires_netCDF4_1_6_2_or_above
@@ -2227,6 +2487,70 @@ class TestNetCDF4AlreadyOpen:
                 assert_identical(expected, copied)
 
 
+class InMemoryNetCDF:
+    engine: T_NetcdfEngine | None
+
+    def test_roundtrip_via_memoryview(self) -> None:
+        original = create_test_data()
+        result = original.to_netcdf(engine=self.engine)
+        roundtrip = load_dataset(result, engine=self.engine)
+        assert_identical(roundtrip, original)
+
+    def test_roundtrip_via_bytes(self) -> None:
+        original = create_test_data()
+        result = bytes(original.to_netcdf(engine=self.engine))
+        roundtrip = load_dataset(result, engine=self.engine)
+        assert_identical(roundtrip, original)
+
+    def test_pickle_open_dataset_from_bytes(self) -> None:
+        original = Dataset({"foo": ("x", [1, 2, 3])})
+        netcdf_bytes = bytes(original.to_netcdf(engine=self.engine))
+        with open_dataset(netcdf_bytes, engine=self.engine) as roundtrip:
+            with pickle.loads(pickle.dumps(roundtrip)) as unpickled:
+                assert_identical(unpickled, original)
+
+    def test_compute_false(self) -> None:
+        original = create_test_data()
+        with pytest.raises(
+            NotImplementedError,
+            match=re.escape("to_netcdf() with compute=False is not yet implemented"),
+        ):
+            original.to_netcdf(engine=self.engine, compute=False)
+
+
+class InMemoryNetCDFWithGroups(InMemoryNetCDF):
+    def test_roundtrip_group_via_memoryview(self) -> None:
+        original = create_test_data()
+        netcdf_bytes = original.to_netcdf(group="sub", engine=self.engine)
+        roundtrip = load_dataset(netcdf_bytes, group="sub", engine=self.engine)
+        assert_identical(roundtrip, original)
+
+
+class FileObjectNetCDF:
+    engine: T_NetcdfEngine
+
+    def test_file_remains_open(self) -> None:
+        data = Dataset({"foo": ("x", [1, 2, 3])})
+        f = BytesIO()
+        data.to_netcdf(f, engine=self.engine)
+        assert not f.closed
+        restored = open_dataset(f, engine=self.engine)
+        assert not f.closed
+        assert_identical(restored, data)
+        restored.close()
+        assert not f.closed
+
+
+@requires_h5netcdf_or_netCDF4
+class TestGenericNetCDF4InMemory(InMemoryNetCDFWithGroups):
+    engine = None
+
+
+@requires_netCDF4
+class TestNetCDF4InMemory(InMemoryNetCDFWithGroups):
+    engine: T_NetcdfEngine = "netcdf4"
+
+
 @requires_netCDF4
 @requires_dask
 @pytest.mark.filterwarnings("ignore:deallocating CachingFileManager")
@@ -2276,6 +2600,36 @@ class TestNetCDF4ViaDaskData(TestNetCDF4
     def test_roundtrip_coordinates(self) -> None:
         super().test_roundtrip_coordinates()
 
+    @requires_cftime
+    def test_roundtrip_cftime_bnds(self):
+        # Regression test for issue #7794
+        import cftime
+
+        original = xr.Dataset(
+            {
+                "foo": ("time", [0.0]),
+                "time_bnds": (
+                    ("time", "bnds"),
+                    [
+                        [
+                            cftime.Datetime360Day(2005, 12, 1, 0, 0, 0, 0),
+                            cftime.Datetime360Day(2005, 12, 2, 0, 0, 0, 0),
+                        ]
+                    ],
+                ),
+            },
+            {"time": [cftime.Datetime360Day(2005, 12, 1, 12, 0, 0, 0)]},
+        )
+
+        with create_tmp_file() as tmp_file:
+            original.to_netcdf(tmp_file)
+            with open_dataset(tmp_file) as actual:
+                # Operation to load actual time_bnds into memory
+                assert_array_equal(actual.time_bnds.values, original.time_bnds.values)
+                chunked = actual.chunk(time=1)
+                with create_tmp_file() as tmp_file_chunked:
+                    chunked.to_netcdf(tmp_file_chunked)
+
 
 @requires_zarr
 @pytest.mark.usefixtures("default_zarr_format")
@@ -2320,6 +2674,17 @@ class ZarrBase(CFEncodedBase):
             with self.open(store_target, **open_kwargs) as ds:
                 yield ds
 
+    @pytest.mark.asyncio
+    @pytest.mark.skipif(
+        not has_zarr_v3,
+        reason="zarr-python <3 did not support async loading",
+    )
+    async def test_load_async(self) -> None:
+        await super().test_load_async()
+
+    def test_roundtrip_bytes_with_fill_value(self):
+        pytest.xfail("Broken by Zarr 3.0.7")
+
     @pytest.mark.parametrize("consolidated", [False, True, None])
     def test_roundtrip_consolidated(self, consolidated) -> None:
         expected = create_test_data()
@@ -2337,17 +2702,22 @@ class ZarrBase(CFEncodedBase):
             self.save(
                 expected, store_target=store, consolidated=False, **self.version_kwargs
             )
-            with pytest.warns(
-                RuntimeWarning,
-                match="Failed to open Zarr store with consolidated",
-            ):
-                with xr.open_zarr(store, **self.version_kwargs) as ds:
-                    assert_identical(ds, expected)
+            if getattr(store, "supports_consolidated_metadata", True):
+                with pytest.warns(
+                    RuntimeWarning,
+                    match="Failed to open Zarr store with consolidated",
+                ):
+                    with xr.open_zarr(store, **self.version_kwargs) as ds:
+                        assert_identical(ds, expected)
 
     def test_non_existent_store(self) -> None:
-        with pytest.raises(
-            FileNotFoundError, match="(No such file or directory|Unable to find group)"
-        ):
+        patterns = [
+            "No such file or directory",
+            "Unable to find group",
+            "No group found in store",
+            "does not exist",
+        ]
+        with pytest.raises(FileNotFoundError, match=f"({'|'.join(patterns)})"):
             xr.open_zarr(f"{uuid.uuid4()}")
 
     @pytest.mark.skipif(has_zarr_v3, reason="chunk_store not implemented in zarr v3")
@@ -2427,7 +2797,14 @@ class ZarrBase(CFEncodedBase):
                 assert_identical(actual, auto)
                 assert_identical(actual.load(), auto.load())
 
+    def test_unlimited_dims_encoding_is_ignored(self) -> None:
+        ds = Dataset({"x": np.arange(10)})
+        ds.encoding = {"unlimited_dims": ["x"]}
+        with self.roundtrip(ds) as actual:
+            assert_identical(ds, actual)
+
     @requires_dask
+    @pytest.mark.filterwarnings("ignore:.*does not have a Zarr V3 specification.*")
     def test_warning_on_bad_chunks(self) -> None:
         original = create_test_data().chunk({"dim1": 4, "dim2": 3, "dim3": 3})
 
@@ -2579,7 +2956,7 @@ class ZarrBase(CFEncodedBase):
         # but intermediate unaligned chunks are bad
         badenc = ds.chunk({"x": (3, 5, 3, 1)})
         badenc.var1.encoding["chunks"] = (3,)
-        with pytest.raises(ValueError, match=r"would overlap multiple dask chunks"):
+        with pytest.raises(ValueError, match=r"would overlap multiple Dask chunks"):
             with self.roundtrip(badenc) as actual:
                 pass
 
@@ -2836,7 +3213,9 @@ class ZarrBase(CFEncodedBase):
 
     @pytest.mark.parametrize("dtype", ["U", "S"])
     def test_append_string_length_mismatch_raises(self, dtype) -> None:
-        skip_if_zarr_format_3("This actually works fine with Zarr format 3")
+        if has_zarr_v3 and not has_zarr_v3_dtypes:
+            skip_if_zarr_format_3("This actually works fine with Zarr format 3")
+
         ds, ds_to_append = create_append_string_length_mismatch_test_data(dtype)
         with self.create_zarr_target() as store_target:
             ds.to_zarr(store_target, mode="w", **self.version_kwargs)
@@ -2849,8 +3228,12 @@ class ZarrBase(CFEncodedBase):
     def test_append_string_length_mismatch_works(self, dtype) -> None:
         skip_if_zarr_format_2("This doesn't work with Zarr format 2")
         # ...but it probably would if we used object dtype
+        if has_zarr_v3_dtypes:
+            pytest.skip("This works on pre ZDtype Zarr-Python, but fails after.")
+
         ds, ds_to_append = create_append_string_length_mismatch_test_data(dtype)
         expected = xr.concat([ds, ds_to_append], dim="time")
+
         with self.create_zarr_target() as store_target:
             ds.to_zarr(store_target, mode="w", **self.version_kwargs)
             ds_to_append.to_zarr(store_target, append_dim="time", **self.version_kwargs)
@@ -3060,6 +3443,14 @@ class ZarrBase(CFEncodedBase):
             ) as actual:
                 assert_identical(actual, nonzeros)
 
+    def test_region_scalar(self) -> None:
+        ds = Dataset({"x": 0})
+        with self.create_zarr_target() as store:
+            ds.to_zarr(store)
+            ds.to_zarr(store, region={}, mode="r+")
+            with xr.open_zarr(store) as actual:
+                assert_identical(actual, ds)
+
     @pytest.mark.parametrize("mode", [None, "r+", "a"])
     def test_write_region_mode(self, mode) -> None:
         zeros = Dataset({"u": (("x",), np.zeros(10))})
@@ -3654,6 +4045,308 @@ class TestZarrDictStore(ZarrBase):
         else:
             yield {}
 
+    def test_chunk_key_encoding_v2(self) -> None:
+        encoding = {"name": "v2", "configuration": {"separator": "/"}}
+
+        # Create a dataset with a variable name containing a period
+        data = np.ones((4, 4))
+        original = Dataset({"var1": (("x", "y"), data)})
+
+        # Set up chunk key encoding with slash separator
+        encoding = {
+            "var1": {
+                "chunk_key_encoding": encoding,
+                "chunks": (2, 2),
+            }
+        }
+
+        # Write to store with custom encoding
+        with self.create_zarr_target() as store:
+            original.to_zarr(store, encoding=encoding)
+
+            # Verify the chunk keys in store use the slash separator
+            if not has_zarr_v3:
+                chunk_keys = [k for k in store.keys() if k.startswith("var1/")]
+                assert len(chunk_keys) > 0
+                for key in chunk_keys:
+                    assert "/" in key
+                    assert "." not in key.split("/")[1:]  # No dots in chunk coordinates
+
+            # Read back and verify data
+            with xr.open_zarr(store) as actual:
+                assert_identical(original, actual)
+                # Verify chunks are preserved
+                assert actual["var1"].encoding["chunks"] == (2, 2)
+
+    @pytest.mark.asyncio
+    @requires_zarr_v3
+    async def test_async_load_multiple_variables(self) -> None:
+        target_class = zarr.AsyncArray
+        method_name = "getitem"
+        original_method = getattr(target_class, method_name)
+
+        # the indexed coordinate variables is not lazy, so the create_test_dataset has 4 lazy variables in total
+        N_LAZY_VARS = 4
+
+        original = create_test_data()
+        with self.create_zarr_target() as store:
+            original.to_zarr(store, zarr_format=3, consolidated=False)
+
+            with patch.object(
+                target_class, method_name, wraps=original_method, autospec=True
+            ) as mocked_meth:
+                # blocks upon loading the coordinate variables here
+                ds = xr.open_zarr(store, consolidated=False, chunks=None)
+
+                # TODO we're not actually testing that these indexing methods are not blocking...
+                result_ds = await ds.load_async()
+
+                mocked_meth.assert_called()
+                assert mocked_meth.call_count == N_LAZY_VARS
+                mocked_meth.assert_awaited()
+
+            xrt.assert_identical(result_ds, ds.load())
+
+    @pytest.mark.asyncio
+    @requires_zarr_v3
+    @pytest.mark.parametrize("cls_name", ["Variable", "DataArray", "Dataset"])
+    async def test_concurrent_load_multiple_objects(
+        self,
+        cls_name,
+    ) -> None:
+        N_OBJECTS = 5
+        N_LAZY_VARS = {
+            "Variable": 1,
+            "DataArray": 1,
+            "Dataset": 4,
+        }  # specific to the create_test_data() used
+
+        target_class = zarr.AsyncArray
+        method_name = "getitem"
+        original_method = getattr(target_class, method_name)
+
+        original = create_test_data()
+        with self.create_zarr_target() as store:
+            original.to_zarr(store, consolidated=False, zarr_format=3)
+
+            with patch.object(
+                target_class, method_name, wraps=original_method, autospec=True
+            ) as mocked_meth:
+                xr_obj = get_xr_obj(store, cls_name)
+
+                # TODO we're not actually testing that these indexing methods are not blocking...
+                coros = [xr_obj.load_async() for _ in range(N_OBJECTS)]
+                results = await asyncio.gather(*coros)
+
+                mocked_meth.assert_called()
+                assert mocked_meth.call_count == N_OBJECTS * N_LAZY_VARS[cls_name]
+                mocked_meth.assert_awaited()
+
+            for result in results:
+                xrt.assert_identical(result, xr_obj.load())
+
+    @pytest.mark.asyncio
+    @requires_zarr_v3
+    @pytest.mark.parametrize("cls_name", ["Variable", "DataArray", "Dataset"])
+    @pytest.mark.parametrize(
+        "indexer, method, target_zarr_class",
+        [
+            pytest.param({}, "sel", "zarr.AsyncArray", id="no-indexing-sel"),
+            pytest.param({}, "isel", "zarr.AsyncArray", id="no-indexing-isel"),
+            pytest.param({"dim2": 1.0}, "sel", "zarr.AsyncArray", id="basic-int-sel"),
+            pytest.param({"dim2": 2}, "isel", "zarr.AsyncArray", id="basic-int-isel"),
+            pytest.param(
+                {"dim2": slice(1.0, 3.0)},
+                "sel",
+                "zarr.AsyncArray",
+                id="basic-slice-sel",
+            ),
+            pytest.param(
+                {"dim2": slice(1, 3)}, "isel", "zarr.AsyncArray", id="basic-slice-isel"
+            ),
+            pytest.param(
+                {"dim2": [1.0, 3.0]},
+                "sel",
+                "zarr.core.indexing.AsyncOIndex",
+                id="outer-sel",
+            ),
+            pytest.param(
+                {"dim2": [1, 3]},
+                "isel",
+                "zarr.core.indexing.AsyncOIndex",
+                id="outer-isel",
+            ),
+            pytest.param(
+                {
+                    "dim1": xr.Variable(data=[2, 3], dims="points"),
+                    "dim2": xr.Variable(data=[1.0, 2.0], dims="points"),
+                },
+                "sel",
+                "zarr.core.indexing.AsyncVIndex",
+                id="vectorized-sel",
+            ),
+            pytest.param(
+                {
+                    "dim1": xr.Variable(data=[2, 3], dims="points"),
+                    "dim2": xr.Variable(data=[1, 3], dims="points"),
+                },
+                "isel",
+                "zarr.core.indexing.AsyncVIndex",
+                id="vectorized-isel",
+            ),
+        ],
+    )
+    async def test_indexing(
+        self,
+        cls_name,
+        method,
+        indexer,
+        target_zarr_class,
+    ) -> None:
+        if not has_zarr_v3_async_oindex and target_zarr_class in (
+            "zarr.core.indexing.AsyncOIndex",
+            "zarr.core.indexing.AsyncVIndex",
+        ):
+            pytest.skip(
+                "current version of zarr does not support orthogonal or vectorized async indexing"
+            )
+
+        if cls_name == "Variable" and method == "sel":
+            pytest.skip("Variable doesn't have a .sel method")
+
+        # Each type of indexing ends up calling a different zarr indexing method
+        # They all use a method named .getitem, but on a different internal zarr class
+        def _resolve_class_from_string(class_path: str) -> type[Any]:
+            """Resolve a string class path like 'zarr.AsyncArray' to the actual class."""
+            module_path, class_name = class_path.rsplit(".", 1)
+            module = import_module(module_path)
+            return getattr(module, class_name)
+
+        target_class = _resolve_class_from_string(target_zarr_class)
+        method_name = "getitem"
+        original_method = getattr(target_class, method_name)
+
+        original = create_test_data()
+        with self.create_zarr_target() as store:
+            original.to_zarr(store, consolidated=False, zarr_format=3)
+
+            with patch.object(
+                target_class, method_name, wraps=original_method, autospec=True
+            ) as mocked_meth:
+                xr_obj = get_xr_obj(store, cls_name)
+
+                # TODO we're not actually testing that these indexing methods are not blocking...
+                result = await getattr(xr_obj, method)(**indexer).load_async()
+
+                mocked_meth.assert_called()
+                mocked_meth.assert_awaited()
+                assert mocked_meth.call_count > 0
+
+            expected = getattr(xr_obj, method)(**indexer).load()
+            xrt.assert_identical(result, expected)
+
+    @pytest.mark.asyncio
+    @pytest.mark.parametrize(
+        ("indexer", "expected_err_msg"),
+        [
+            pytest.param(
+                {"dim2": 2},
+                "basic async indexing",
+                marks=pytest.mark.skipif(
+                    has_zarr_v3,
+                    reason="current version of zarr has basic async indexing",
+                ),
+            ),  # tests basic indexing
+            pytest.param(
+                {"dim2": [1, 3]},
+                "orthogonal async indexing",
+                marks=pytest.mark.skipif(
+                    has_zarr_v3_async_oindex,
+                    reason="current version of zarr has async orthogonal indexing",
+                ),
+            ),  # tests oindexing
+            pytest.param(
+                {
+                    "dim1": xr.Variable(data=[2, 3], dims="points"),
+                    "dim2": xr.Variable(data=[1, 3], dims="points"),
+                },
+                "vectorized async indexing",
+                marks=pytest.mark.skipif(
+                    has_zarr_v3_async_oindex,
+                    reason="current version of zarr has async vectorized indexing",
+                ),
+            ),  # tests vindexing
+        ],
+    )
+    @parametrize_zarr_format
+    async def test_raise_on_older_zarr_version(
+        self,
+        indexer,
+        expected_err_msg,
+        zarr_format,
+    ):
+        """Test that trying to use async load with insufficiently new version of zarr raises a clear error"""
+
+        original = create_test_data()
+        with self.create_zarr_target() as store:
+            original.to_zarr(store, consolidated=False, zarr_format=zarr_format)
+
+            ds = xr.open_zarr(store, consolidated=False, chunks=None)
+            var = ds["var1"].variable
+
+            with pytest.raises(NotImplementedError, match=expected_err_msg):
+                await var.isel(**indexer).load_async()
+
+
+def get_xr_obj(
+    store: zarr.abc.store.Store, cls_name: Literal["Variable", "DataArray", "Dataset"]
+):
+    ds = xr.open_zarr(store, consolidated=False, chunks=None)
+
+    match cls_name:
+        case "Variable":
+            return ds["var1"].variable
+        case "DataArray":
+            return ds["var1"]
+        case "Dataset":
+            return ds
+
+
+class NoConsolidatedMetadataSupportStore(WrapperStore):
+    """
+    Store that explicitly does not support consolidated metadata.
+
+    Useful as a proxy for stores like Icechunk, see https://github.com/zarr-developers/zarr-python/pull/3119.
+    """
+
+    supports_consolidated_metadata = False
+
+    def __init__(
+        self,
+        store,
+        *,
+        read_only: bool = False,
+    ) -> None:
+        self._store = store.with_read_only(read_only=read_only)
+
+    def with_read_only(
+        self, read_only: bool = False
+    ) -> NoConsolidatedMetadataSupportStore:
+        return type(self)(
+            store=self._store,
+            read_only=read_only,
+        )
+
+
+@requires_zarr_v3
+class TestZarrNoConsolidatedMetadataSupport(ZarrBase):
+    @contextlib.contextmanager
+    def create_zarr_target(self):
+        # TODO the zarr version would need to be >3.08 for the supports_consolidated_metadata property to have any effect
+        yield NoConsolidatedMetadataSupportStore(
+            zarr.storage.MemoryStore({}, read_only=False)
+        )
+
 
 @requires_zarr
 @pytest.mark.skipif(
@@ -3695,6 +4388,23 @@ class TestZarrWriteEmpty(TestZarrDirecto
         ) as ds:
             yield ds
 
+    @requires_dask
+    def test_default_zarr_fill_value(self):
+        inputs = xr.Dataset({"floats": ("x", [1.0]), "ints": ("x", [1])}).chunk()
+        expected = xr.Dataset({"floats": ("x", [np.nan]), "ints": ("x", [0])})
+        with self.temp_dir() as (_d, store):
+            inputs.to_zarr(store, compute=False)
+            with open_dataset(store) as on_disk:
+                assert np.isnan(on_disk.variables["floats"].encoding["_FillValue"])
+                assert (
+                    "_FillValue" not in on_disk.variables["ints"].encoding
+                )  # use default
+                if not has_zarr_v3:
+                    # zarr-python v2 interprets fill_value=None inconsistently
+                    del on_disk["ints"]
+                    del expected["ints"]
+                assert_identical(expected, on_disk)
+
     @pytest.mark.parametrize("consolidated", [True, False, None])
     @pytest.mark.parametrize("write_empty", [True, False, None])
     def test_write_empty(
@@ -3714,13 +4424,11 @@ class TestZarrWriteEmpty(TestZarrDirecto
                     ]
                 )
 
-            assert set(expected) == set(
-                [
-                    file.lstrip("c/")
-                    for file in ls
-                    if (file not in (".zattrs", ".zarray", "zarr.json"))
-                ]
-            )
+            assert set(expected) == {
+                file.lstrip("c/")
+                for file in ls
+                if (file not in (".zattrs", ".zarray", "zarr.json"))
+            }
 
         # The zarr format is set by the `default_zarr_format`
         # pytest fixture that acts on a superclass
@@ -3735,14 +4443,13 @@ class TestZarrWriteEmpty(TestZarrDirecto
                 "0.1.1",
             ]
 
+        # use nan for default fill_value behaviour
+        data = np.array([np.nan, np.nan, 1.0, np.nan]).reshape((1, 2, 2))
+
         if zarr_format_3:
-            data = np.array([0.0, 0, 1.0, 0]).reshape((1, 2, 2))
             # transform to the path style of zarr 3
             # e.g. 0/0/1
             expected = [e.replace(".", "/") for e in expected]
-        else:
-            # use nan for default fill_value behaviour
-            data = np.array([np.nan, np.nan, 1.0, np.nan]).reshape((1, 2, 2))
 
         ds = xr.Dataset(data_vars={"test": (("Z", "Y", "X"), data)})
 
@@ -3752,7 +4459,7 @@ class TestZarrWriteEmpty(TestZarrDirecto
         else:
             encoding = {"test": {"chunks": (1, 1, 1)}}
 
-        with self.temp_dir() as (d, store):
+        with self.temp_dir() as (_d, store):
             ds.to_zarr(
                 store,
                 mode="w",
@@ -3779,20 +4486,20 @@ class TestZarrWriteEmpty(TestZarrDirecto
                 # that was performed by the roundtrip_dir
                 if (write_empty is False) or (write_empty is None and has_zarr_v3):
                     expected.append("1.1.0")
+                elif not has_zarr_v3 or has_zarr_v3_async_oindex:
+                    # this was broken from zarr 3.0.0 until 3.1.2
+                    # async oindex released in 3.1.2 along with a fix
+                    # for write_empty_chunks in append
+                    expected.extend(
+                        [
+                            "1.1.0",
+                            "1.0.0",
+                            "1.0.1",
+                            "1.1.1",
+                        ]
+                    )
                 else:
-                    if not has_zarr_v3:
-                        # TODO: remove zarr3 if once zarr issue is fixed
-                        # https://github.com/zarr-developers/zarr-python/issues/2931
-                        expected.extend(
-                            [
-                                "1.1.0",
-                                "1.0.0",
-                                "1.0.1",
-                                "1.1.1",
-                            ]
-                        )
-                    else:
-                        expected.append("1.1.0")
+                    expected.append("1.1.0")
                 if zarr_format_3:
                     expected = [e.replace(".", "/") for e in expected]
                 assert_expected_files(expected, store)
@@ -3871,7 +4578,7 @@ def test_zarr_version_deprecated() -> No
 
 
 @requires_scipy
-class TestScipyInMemoryData(CFEncodedBase, NetCDF3Only):
+class TestScipyInMemoryData(CFEncodedBase, NetCDF3Only, InMemoryNetCDF):
     engine: T_NetcdfEngine = "scipy"
 
     @contextlib.contextmanager
@@ -3879,20 +4586,28 @@ class TestScipyInMemoryData(CFEncodedBas
         fobj = BytesIO()
         yield backends.ScipyDataStore(fobj, "w")
 
-    def test_to_netcdf_explicit_engine(self) -> None:
-        # regression test for GH1321
-        Dataset({"foo": 42}).to_netcdf(engine="scipy")
+    @contextlib.contextmanager
+    def roundtrip(
+        self, data, save_kwargs=None, open_kwargs=None, allow_cleanup_failure=False
+    ):
+        if save_kwargs is None:
+            save_kwargs = {}
+        if open_kwargs is None:
+            open_kwargs = {}
+        saved = self.save(data, path=None, **save_kwargs)
+        with self.open(saved, **open_kwargs) as ds:
+            yield ds
 
-    def test_bytes_pickle(self) -> None:
-        data = Dataset({"foo": ("x", [1, 2, 3])})
-        fobj = data.to_netcdf()
-        with self.open(fobj) as ds:
-            unpickled = pickle.loads(pickle.dumps(ds))
-            assert_identical(unpickled, data)
+    @pytest.mark.asyncio
+    @pytest.mark.skip(reason="NetCDF backends don't support async loading")
+    async def test_load_async(self) -> None:
+        await super().test_load_async()
 
 
 @requires_scipy
-class TestScipyFileObject(CFEncodedBase, NetCDF3Only):
+class TestScipyFileObject(CFEncodedBase, NetCDF3Only, FileObjectNetCDF):
+    # TODO: Consider consolidating some of these cases (e.g.,
+    # test_file_remains_open) with TestH5NetCDFFileObject
     engine: T_NetcdfEngine = "scipy"
 
     @contextlib.contextmanager
@@ -3915,17 +4630,42 @@ class TestScipyFileObject(CFEncodedBase,
                 with self.open(f, **open_kwargs) as ds:
                     yield ds
 
+    @pytest.mark.asyncio
+    @pytest.mark.skip(reason="NetCDF backends don't support async loading")
+    async def test_load_async(self) -> None:
+        await super().test_load_async()
+
     @pytest.mark.skip(reason="cannot pickle file objects")
     def test_pickle(self) -> None:
-        pass
+        super().test_pickle()
 
     @pytest.mark.skip(reason="cannot pickle file objects")
     def test_pickle_dataarray(self) -> None:
-        pass
+        super().test_pickle_dataarray()
+
+    @pytest.mark.parametrize("create_default_indexes", [True, False])
+    def test_create_default_indexes(self, tmp_path, create_default_indexes) -> None:
+        store_path = tmp_path / "tmp.nc"
+        original_ds = xr.Dataset(
+            {"data": ("x", np.arange(3))}, coords={"x": [-1, 0, 1]}
+        )
+        original_ds.to_netcdf(store_path, engine=self.engine, mode="w")
+
+        with open_dataset(
+            store_path,
+            engine=self.engine,
+            create_default_indexes=create_default_indexes,
+        ) as loaded_ds:
+            if create_default_indexes:
+                assert list(loaded_ds.xindexes) == ["x"] and isinstance(
+                    loaded_ds.xindexes["x"], PandasIndex
+                )
+            else:
+                assert len(loaded_ds.xindexes) == 0
 
 
 @requires_scipy
-class TestScipyFilePath(CFEncodedBase, NetCDF3Only):
+class TestScipyFilePath(NetCDF3Only, CFEncodedBase):
     engine: T_NetcdfEngine = "scipy"
 
     @contextlib.contextmanager
@@ -3962,7 +4702,7 @@ class TestScipyFilePath(CFEncodedBase, N
 
 
 @requires_netCDF4
-class TestNetCDF3ViaNetCDF4Data(CFEncodedBase, NetCDF3Only):
+class TestNetCDF3ViaNetCDF4Data(NetCDF3Only, CFEncodedBase):
     engine: T_NetcdfEngine = "netcdf4"
     file_format: T_NetcdfTypes = "NETCDF3_CLASSIC"
 
@@ -3983,7 +4723,7 @@ class TestNetCDF3ViaNetCDF4Data(CFEncode
 
 
 @requires_netCDF4
-class TestNetCDF4ClassicViaNetCDF4Data(CFEncodedBase, NetCDF3Only):
+class TestNetCDF4ClassicViaNetCDF4Data(NetCDF3Only, CFEncodedBase):
     engine: T_NetcdfEngine = "netcdf4"
     file_format: T_NetcdfTypes = "NETCDF4_CLASSIC"
 
@@ -3997,7 +4737,7 @@ class TestNetCDF4ClassicViaNetCDF4Data(C
 
 
 @requires_scipy_or_netCDF4
-class TestGenericNetCDFData(CFEncodedBase, NetCDF3Only):
+class TestGenericNetCDFData(NetCDF3Only, CFEncodedBase):
     # verify that we can read and write netCDF3 files as long as we have scipy
     # or netCDF4-python installed
     file_format: T_NetcdfTypes = "NETCDF3_64BIT"
@@ -4007,21 +4747,34 @@ class TestGenericNetCDFData(CFEncodedBas
         pass
 
     @requires_scipy
+    @requires_netCDF4
     def test_engine(self) -> None:
         data = create_test_data()
+
         with pytest.raises(ValueError, match=r"unrecognized engine"):
             data.to_netcdf("foo.nc", engine="foobar")  # type: ignore[call-overload]
-        with pytest.raises(ValueError, match=r"invalid engine"):
-            data.to_netcdf(engine="netcdf4")
 
         with create_tmp_file() as tmp_file:
             data.to_netcdf(tmp_file)
             with pytest.raises(ValueError, match=r"unrecognized engine"):
                 open_dataset(tmp_file, engine="foobar")
 
-        netcdf_bytes = data.to_netcdf()
+        with pytest.raises(
+            TypeError,
+            match=re.escape("file objects are not supported by the netCDF4 backend"),
+        ):
+            data.to_netcdf(BytesIO(), engine="netcdf4")
+
+        with pytest.raises(
+            TypeError,
+            match=re.escape("file objects are not supported by the netCDF4 backend"),
+        ):
+            open_dataset(BytesIO(), engine="netcdf4")
+
+        bytes_io = BytesIO()
+        data.to_netcdf(bytes_io, engine="scipy")
         with pytest.raises(ValueError, match=r"unrecognized engine"):
-            open_dataset(BytesIO(netcdf_bytes), engine="foobar")
+            open_dataset(bytes_io, engine="foobar")
 
     def test_cross_engine_read_write_netcdf3(self) -> None:
         data = create_test_data()
@@ -4068,6 +4821,28 @@ class TestGenericNetCDFData(CFEncodedBas
             assert actual.encoding["unlimited_dims"] == set("y")
             assert_equal(ds, actual)
 
+    @requires_scipy
+    def test_roundtrip_via_bytes(self) -> None:
+        original = create_test_data()
+        netcdf_bytes = original.to_netcdf()
+        roundtrip = load_dataset(netcdf_bytes)
+        assert_identical(roundtrip, original)
+
+    @pytest.mark.xfail(
+        reason="scipy.io.netcdf_file closes files upon garbage collection"
+    )
+    @requires_scipy
+    def test_roundtrip_via_file_object(self) -> None:
+        original = create_test_data()
+        f = BytesIO()
+        original.to_netcdf(f)
+        assert not f.closed
+        restored = open_dataset(f)
+        assert not f.closed
+        assert_identical(restored, original)
+        restored.close()
+        assert not f.closed
+
 
 @requires_h5netcdf
 @requires_netCDF4
@@ -4136,16 +4911,6 @@ class TestH5NetCDFData(NetCDF4Base):
                 expected = Dataset(attrs={"foo": "bar"})
                 assert_identical(expected, actual)
 
-    def test_encoding_unlimited_dims(self) -> None:
-        ds = Dataset({"x": ("y", np.arange(10.0))})
-        with self.roundtrip(ds, save_kwargs=dict(unlimited_dims=["y"])) as actual:
-            assert actual.encoding["unlimited_dims"] == set("y")
-            assert_equal(ds, actual)
-        ds.encoding = {"unlimited_dims": ["y"]}
-        with self.roundtrip(ds) as actual:
-            assert actual.encoding["unlimited_dims"] == set("y")
-            assert_equal(ds, actual)
-
     def test_compression_encoding_h5py(self) -> None:
         ENCODINGS: tuple[tuple[dict[str, Any], dict[str, Any]], ...] = (
             # h5py style compression with gzip codec will be converted to
@@ -4343,20 +5108,19 @@ class TestH5NetCDFAlreadyOpen:
 
 
 @requires_h5netcdf
-class TestH5NetCDFFileObject(TestH5NetCDFData):
+class TestH5NetCDFFileObject(TestH5NetCDFData, FileObjectNetCDF):
     engine: T_NetcdfEngine = "h5netcdf"
 
     def test_open_badbytes(self) -> None:
-        with pytest.raises(ValueError, match=r"HDF5 as bytes"):
-            with open_dataset(b"\211HDF\r\n\032\n", engine="h5netcdf"):  # type: ignore[arg-type]
-                pass
         with pytest.raises(
             ValueError, match=r"match in any of xarray's currently installed IO"
         ):
-            with open_dataset(b"garbage"):  # type: ignore[arg-type]
+            with open_dataset(b"garbage"):
                 pass
-        with pytest.raises(ValueError, match=r"can only read bytes"):
-            with open_dataset(b"garbage", engine="netcdf4"):  # type: ignore[arg-type]
+        with pytest.raises(
+            ValueError, match=r"not the signature of a valid netCDF4 file"
+        ):
+            with open_dataset(b"garbage", engine="h5netcdf"):
                 pass
         with pytest.raises(
             ValueError, match=r"not the signature of a valid netCDF4 file"
@@ -4366,13 +5130,12 @@ class TestH5NetCDFFileObject(TestH5NetCD
 
     def test_open_twice(self) -> None:
         expected = create_test_data()
-        expected.attrs["foo"] = "bar"
         with create_tmp_file() as tmp_file:
-            expected.to_netcdf(tmp_file, engine="h5netcdf")
+            expected.to_netcdf(tmp_file, engine=self.engine)
             with open(tmp_file, "rb") as f:
-                with open_dataset(f, engine="h5netcdf"):
-                    with open_dataset(f, engine="h5netcdf"):
-                        pass
+                with open_dataset(f, engine=self.engine):
+                    with open_dataset(f, engine=self.engine):
+                        pass  # should not crash
 
     @requires_scipy
     def test_open_fileobj(self) -> None:
@@ -4407,6 +5170,25 @@ class TestH5NetCDFFileObject(TestH5NetCD
                 with open_dataset(f):  # ensure file gets closed
                     pass
 
+    @requires_fsspec
+    def test_fsspec(self) -> None:
+        expected = create_test_data()
+        with create_tmp_file() as tmp_file:
+            expected.to_netcdf(tmp_file, engine="h5netcdf")
+
+            with fsspec.open(tmp_file, "rb") as f:
+                with open_dataset(f, engine="h5netcdf") as actual:
+                    assert_identical(actual, expected)
+
+                    # fsspec.open() creates a pickleable file, unlike open()
+                    with pickle.loads(pickle.dumps(actual)) as unpickled:
+                        assert_identical(unpickled, expected)
+
+
+@requires_h5netcdf
+class TestH5NetCDFInMemoryData(InMemoryNetCDFWithGroups):
+    engine: T_NetcdfEngine = "h5netcdf"
+
 
 @requires_h5netcdf
 @requires_dask
@@ -4448,12 +5230,29 @@ class TestH5NetCDFViaDaskData(TestH5NetC
             assert actual["y"].encoding["chunksizes"] == (100, 50)
 
 
+@requires_netCDF4
+@requires_h5netcdf
+def test_memoryview_write_h5netcdf_read_netcdf4() -> None:
+    original = create_test_data()
+    result = original.to_netcdf(engine="h5netcdf")
+    roundtrip = load_dataset(result, engine="netcdf4")
+    assert_identical(roundtrip, original)
+
+
+@requires_netCDF4
+@requires_h5netcdf
+def test_memoryview_write_netcdf4_read_h5netcdf() -> None:
+    original = create_test_data()
+    result = original.to_netcdf(engine="netcdf4")
+    roundtrip = load_dataset(result, engine="h5netcdf")
+    assert_identical(roundtrip, original)
+
+
+@network
 @requires_h5netcdf_ros3
 class TestH5NetCDFDataRos3Driver(TestCommon):
     engine: T_NetcdfEngine = "h5netcdf"
-    test_remote_dataset: str = (
-        "https://www.unidata.ucar.edu/software/netcdf/examples/OMI-Aura_L2-example.nc"
-    )
+    test_remote_dataset: str = "https://archive.unidata.ucar.edu/software/netcdf/examples/OMI-Aura_L2-example.nc"
 
     @pytest.mark.filterwarnings("ignore:Duplicate dimension names")
     def test_get_variable_list(self) -> None:
@@ -4583,11 +5382,9 @@ def test_open_mfdataset_list_attr() -> N
     """
     Case when an attribute of type list differs across the multiple files
     """
-    from netCDF4 import Dataset
-
     with create_tmp_files(2) as nfiles:
         for i in range(2):
-            with Dataset(nfiles[i], "w") as f:
+            with nc4.Dataset(nfiles[i], "w") as f:
                 f.createDimension("x", 3)
                 vlvar = f.createVariable("test_var", np.int32, ("x"))
                 # here create an attribute as a list
@@ -4610,7 +5407,7 @@ class TestOpenMFDatasetWithDataVarsAndCo
     var_name = "v1"
 
     @contextlib.contextmanager
-    def setup_files_and_datasets(self, fuzz=0):
+    def setup_files_and_datasets(self, *, fuzz=0, new_combine_kwargs: bool = False):
         ds1, ds2 = self.gen_datasets_with_common_coord_and_time()
 
         # to test join='exact'
@@ -4622,7 +5419,8 @@ class TestOpenMFDatasetWithDataVarsAndCo
                 ds1.to_netcdf(tmpfile1)
                 ds2.to_netcdf(tmpfile2)
 
-                yield [tmpfile1, tmpfile2], [ds1, ds2]
+                with set_options(use_new_combine_kwarg_defaults=new_combine_kwargs):
+                    yield [tmpfile1, tmpfile2], [ds1, ds2]
 
     def gen_datasets_with_common_coord_and_time(self):
         # create coordinate data
@@ -4659,11 +5457,19 @@ class TestOpenMFDatasetWithDataVarsAndCo
             if combine == "by_coords":
                 files.reverse()
             with open_mfdataset(
-                files, data_vars=opt, combine=combine, concat_dim=concat_dim, join=join
+                files,
+                data_vars=opt,
+                combine=combine,
+                concat_dim=concat_dim,
+                join=join,
+                compat="equals",
             ) as ds:
-                ds_expect = xr.concat([ds1, ds2], data_vars=opt, dim="t", join=join)
+                ds_expect = xr.concat(
+                    [ds1, ds2], data_vars=opt, dim="t", join=join, compat="equals"
+                )
                 assert_identical(ds, ds_expect)
 
+    @pytest.mark.parametrize("use_new_combine_kwarg_defaults", [True, False])
     @pytest.mark.parametrize(
         ["combine_attrs", "attrs", "expected", "expect_error"],
         (
@@ -4691,9 +5497,14 @@ class TestOpenMFDatasetWithDataVarsAndCo
         ),
     )
     def test_open_mfdataset_dataset_combine_attrs(
-        self, combine_attrs, attrs, expected, expect_error
+        self,
+        use_new_combine_kwarg_defaults,
+        combine_attrs,
+        attrs,
+        expected,
+        expect_error,
     ):
-        with self.setup_files_and_datasets() as (files, [ds1, ds2]):
+        with self.setup_files_and_datasets() as (files, [_ds1, _ds2]):
             # Give the files an inconsistent attribute
             for i, f in enumerate(files):
                 ds = open_dataset(f).load()
@@ -4701,28 +5512,34 @@ class TestOpenMFDatasetWithDataVarsAndCo
                 ds.close()
                 ds.to_netcdf(f)
 
-            if expect_error:
-                with pytest.raises(xr.MergeError):
-                    xr.open_mfdataset(
-                        files,
-                        combine="nested",
-                        concat_dim="t",
-                        combine_attrs=combine_attrs,
-                    )
-            else:
-                with xr.open_mfdataset(
-                    files,
-                    combine="nested",
-                    concat_dim="t",
-                    combine_attrs=combine_attrs,
-                ) as ds:
-                    assert ds.attrs == expected
+            with set_options(
+                use_new_combine_kwarg_defaults=use_new_combine_kwarg_defaults
+            ):
+                warning: contextlib.AbstractContextManager = (
+                    pytest.warns(FutureWarning)
+                    if not use_new_combine_kwarg_defaults
+                    else contextlib.nullcontext()
+                )
+                error: contextlib.AbstractContextManager = (
+                    pytest.raises(xr.MergeError)
+                    if expect_error
+                    else contextlib.nullcontext()
+                )
+                with warning:
+                    with error:
+                        with xr.open_mfdataset(
+                            files,
+                            combine="nested",
+                            concat_dim="t",
+                            combine_attrs=combine_attrs,
+                        ) as ds:
+                            assert ds.attrs == expected
 
     def test_open_mfdataset_dataset_attr_by_coords(self) -> None:
         """
         Case when an attribute differs across the multiple files
         """
-        with self.setup_files_and_datasets() as (files, [ds1, ds2]):
+        with self.setup_files_and_datasets() as (files, [_ds1, _ds2]):
             # Give the files an inconsistent attribute
             for i, f in enumerate(files):
                 ds = open_dataset(f).load()
@@ -4730,14 +5547,18 @@ class TestOpenMFDatasetWithDataVarsAndCo
                 ds.close()
                 ds.to_netcdf(f)
 
-            with xr.open_mfdataset(files, combine="nested", concat_dim="t") as ds:
-                assert ds.test_dataset_attr == 10
+            with set_options(use_new_combine_kwarg_defaults=True):
+                with xr.open_mfdataset(files, combine="nested", concat_dim="t") as ds:
+                    assert ds.test_dataset_attr == 10
 
     def test_open_mfdataset_dataarray_attr_by_coords(self) -> None:
         """
         Case when an attribute of a member DataArray differs across the multiple files
         """
-        with self.setup_files_and_datasets() as (files, [ds1, ds2]):
+        with self.setup_files_and_datasets(new_combine_kwargs=True) as (
+            files,
+            [_ds1, _ds2],
+        ):
             # Give the files an inconsistent attribute
             for i, f in enumerate(files):
                 ds = open_dataset(f).load()
@@ -4745,30 +5566,67 @@ class TestOpenMFDatasetWithDataVarsAndCo
                 ds.close()
                 ds.to_netcdf(f)
 
-            with xr.open_mfdataset(files, combine="nested", concat_dim="t") as ds:
-                assert ds["v1"].test_dataarray_attr == 0
+                with xr.open_mfdataset(
+                    files, data_vars=None, combine="nested", concat_dim="t"
+                ) as ds:
+                    assert ds["v1"].test_dataarray_attr == 0
 
     @pytest.mark.parametrize(
         "combine, concat_dim", [("nested", "t"), ("by_coords", None)]
     )
-    @pytest.mark.parametrize("opt", ["all", "minimal", "different"])
+    @pytest.mark.parametrize(
+        "kwargs",
+        [
+            {"data_vars": "all"},
+            {"data_vars": "minimal"},
+            {
+                "data_vars": "all",
+                "coords": "different",
+                "compat": "no_conflicts",
+            },  # old defaults
+            {
+                "data_vars": None,
+                "coords": "minimal",
+                "compat": "override",
+            },  # new defaults
+            {"data_vars": "different", "compat": "no_conflicts"},
+            {},
+        ],
+    )
     def test_open_mfdataset_exact_join_raises_error(
-        self, combine, concat_dim, opt
+        self, combine, concat_dim, kwargs
     ) -> None:
-        with self.setup_files_and_datasets(fuzz=0.1) as (files, [ds1, ds2]):
+        with self.setup_files_and_datasets(fuzz=0.1, new_combine_kwargs=True) as (
+            files,
+            _,
+        ):
             if combine == "by_coords":
                 files.reverse()
+
             with pytest.raises(
-                ValueError, match=r"cannot align objects.*join.*exact.*"
+                ValueError, match="cannot align objects with join='exact'"
             ):
                 open_mfdataset(
                     files,
-                    data_vars=opt,
+                    **kwargs,
                     combine=combine,
                     concat_dim=concat_dim,
                     join="exact",
                 )
 
+    def test_open_mfdataset_defaults_with_exact_join_warns_as_well_as_raising(
+        self,
+    ) -> None:
+        with self.setup_files_and_datasets(fuzz=0.1, new_combine_kwargs=True) as (
+            files,
+            _,
+        ):
+            files.reverse()
+            with pytest.raises(
+                ValueError, match="cannot align objects with join='exact'"
+            ):
+                open_mfdataset(files, combine="by_coords")
+
     def test_common_coord_when_datavars_all(self) -> None:
         opt: Final = "all"
 
@@ -4790,7 +5648,10 @@ class TestOpenMFDatasetWithDataVarsAndCo
     def test_common_coord_when_datavars_minimal(self) -> None:
         opt: Final = "minimal"
 
-        with self.setup_files_and_datasets() as (files, [ds1, ds2]):
+        with self.setup_files_and_datasets(new_combine_kwargs=True) as (
+            files,
+            [ds1, ds2],
+        ):
             # open the files using data_vars option
             with open_mfdataset(
                 files, data_vars=opt, combine="nested", concat_dim="t"
@@ -4816,6 +5677,55 @@ class TestOpenMFDatasetWithDataVarsAndCo
                 with open_mfdataset(files, coords="minimum", combine="by_coords"):
                     pass
 
+    @pytest.mark.parametrize(
+        "combine, concat_dim", [("nested", "t"), ("by_coords", None)]
+    )
+    @pytest.mark.parametrize(
+        "kwargs", [{"data_vars": "different"}, {"coords": "different"}]
+    )
+    def test_open_mfdataset_warns_when_kwargs_set_to_different(
+        self, combine, concat_dim, kwargs
+    ) -> None:
+        with self.setup_files_and_datasets(new_combine_kwargs=True) as (
+            files,
+            [ds1, ds2],
+        ):
+            if combine == "by_coords":
+                files.reverse()
+            with pytest.raises(
+                ValueError, match="Previously the default was `compat='no_conflicts'`"
+            ):
+                open_mfdataset(files, combine=combine, concat_dim=concat_dim, **kwargs)
+            with pytest.raises(
+                ValueError, match="Previously the default was `compat='equals'`"
+            ):
+                xr.concat([ds1, ds2], dim="t", **kwargs)
+
+            with set_options(use_new_combine_kwarg_defaults=False):
+                expectation: contextlib.AbstractContextManager = (
+                    pytest.warns(
+                        FutureWarning,
+                        match="will change from data_vars='all'",
+                    )
+                    if "data_vars" not in kwargs
+                    else contextlib.nullcontext()
+                )
+
+                with pytest.warns(
+                    FutureWarning,
+                    match="will change from compat='equals'",
+                ):
+                    with expectation:
+                        ds_expect = xr.concat([ds1, ds2], dim="t", **kwargs)
+                with pytest.warns(
+                    FutureWarning, match="will change from compat='no_conflicts'"
+                ):
+                    with expectation:
+                        with open_mfdataset(
+                            files, combine=combine, concat_dim=concat_dim, **kwargs
+                        ) as ds:
+                            assert_identical(ds, ds_expect)
+
 
 @requires_dask
 @requires_scipy
@@ -4978,6 +5888,68 @@ class TestDask(DatasetIOBase):
                 ) as actual:
                     assert_identical(original, actual)
 
+    def test_open_mfdataset_with_ignore(self) -> None:
+        original = Dataset({"foo": ("x", np.random.randn(10))})
+        with create_tmp_files(2) as (tmp1, tmp2):
+            ds1 = original.isel(x=slice(5))
+            ds2 = original.isel(x=slice(5, 10))
+            ds1.to_netcdf(tmp1)
+            ds2.to_netcdf(tmp2)
+            with open_mfdataset(
+                [tmp1, "non-existent-file.nc", tmp2],
+                concat_dim="x",
+                combine="nested",
+                errors="ignore",
+            ) as actual:
+                assert_identical(original, actual)
+
+    def test_open_mfdataset_with_warn(self) -> None:
+        original = Dataset({"foo": ("x", np.random.randn(10))})
+        with pytest.warns(UserWarning, match=r"Ignoring."):
+            with create_tmp_files(2) as (tmp1, tmp2):
+                ds1 = original.isel(x=slice(5))
+                ds2 = original.isel(x=slice(5, 10))
+                ds1.to_netcdf(tmp1)
+                ds2.to_netcdf(tmp2)
+                with open_mfdataset(
+                    [tmp1, "non-existent-file.nc", tmp2],
+                    concat_dim="x",
+                    combine="nested",
+                    errors="warn",
+                ) as actual:
+                    assert_identical(original, actual)
+
+    def test_open_mfdataset_2d_with_ignore(self) -> None:
+        original = Dataset({"foo": (["x", "y"], np.random.randn(10, 8))})
+        with create_tmp_files(4) as (tmp1, tmp2, tmp3, tmp4):
+            original.isel(x=slice(5), y=slice(4)).to_netcdf(tmp1)
+            original.isel(x=slice(5, 10), y=slice(4)).to_netcdf(tmp2)
+            original.isel(x=slice(5), y=slice(4, 8)).to_netcdf(tmp3)
+            original.isel(x=slice(5, 10), y=slice(4, 8)).to_netcdf(tmp4)
+            with open_mfdataset(
+                [[tmp1, tmp2], ["non-existent-file.nc", tmp3, tmp4]],
+                combine="nested",
+                concat_dim=["y", "x"],
+                errors="ignore",
+            ) as actual:
+                assert_identical(original, actual)
+
+    def test_open_mfdataset_2d_with_warn(self) -> None:
+        original = Dataset({"foo": (["x", "y"], np.random.randn(10, 8))})
+        with pytest.warns(UserWarning, match=r"Ignoring."):
+            with create_tmp_files(4) as (tmp1, tmp2, tmp3, tmp4):
+                original.isel(x=slice(5), y=slice(4)).to_netcdf(tmp1)
+                original.isel(x=slice(5, 10), y=slice(4)).to_netcdf(tmp2)
+                original.isel(x=slice(5), y=slice(4, 8)).to_netcdf(tmp3)
+                original.isel(x=slice(5, 10), y=slice(4, 8)).to_netcdf(tmp4)
+                with open_mfdataset(
+                    [[tmp1, tmp2, "non-existent-file.nc"], [tmp3, tmp4]],
+                    combine="nested",
+                    concat_dim=["y", "x"],
+                    errors="warn",
+                ) as actual:
+                    assert_identical(original, actual)
+
     def test_attrs_mfdataset(self) -> None:
         original = Dataset({"foo": ("x", np.random.randn(10))})
         with create_tmp_file() as tmp1:
@@ -5071,11 +6043,48 @@ class TestDask(DatasetIOBase):
                 ds2.t.encoding["units"] = "days since 2000-01-01"
                 ds1.to_netcdf(tmp1)
                 ds2.to_netcdf(tmp2)
-                with open_mfdataset([tmp1, tmp2], combine="nested") as actual:
+                with open_mfdataset(
+                    [tmp1, tmp2], combine="nested", concat_dim="t"
+                ) as actual:
                     assert actual.t.encoding["units"] == original.t.encoding["units"]
                     assert actual.t.encoding["units"] == ds1.t.encoding["units"]
                     assert actual.t.encoding["units"] != ds2.t.encoding["units"]
 
+    def test_encoding_mfdataset_new_defaults(self) -> None:
+        original = Dataset(
+            {
+                "foo": ("t", np.random.randn(10)),
+                "t": ("t", pd.date_range(start="2010-01-01", periods=10, freq="1D")),
+            }
+        )
+        original.t.encoding["units"] = "days since 2010-01-01"
+
+        with create_tmp_file() as tmp1:
+            with create_tmp_file() as tmp2:
+                ds1 = original.isel(t=slice(5))
+                ds2 = original.isel(t=slice(5, 10))
+                ds1.t.encoding["units"] = "days since 2010-01-01"
+                ds2.t.encoding["units"] = "days since 2000-01-01"
+                ds1.to_netcdf(tmp1)
+                ds2.to_netcdf(tmp2)
+
+                for setting in [True, False]:
+                    with set_options(use_new_combine_kwarg_defaults=setting):
+                        with open_mfdataset(
+                            [tmp1, tmp2], combine="nested", concat_dim="t"
+                        ) as old:
+                            assert (
+                                old.t.encoding["units"] == original.t.encoding["units"]
+                            )
+                            assert old.t.encoding["units"] == ds1.t.encoding["units"]
+                            assert old.t.encoding["units"] != ds2.t.encoding["units"]
+
+                with set_options(use_new_combine_kwarg_defaults=True):
+                    with pytest.raises(
+                        AlignmentError, match="If you are intending to concatenate"
+                    ):
+                        open_mfdataset([tmp1, tmp2], combine="nested")
+
     def test_preprocess_mfdataset(self) -> None:
         original = Dataset({"foo": ("x", np.random.randn(10))})
         with create_tmp_file() as tmp:
@@ -5158,25 +6167,21 @@ class TestDask(DatasetIOBase):
                 actual = 1.0 * ds
                 assert_allclose(original, actual, decode_bytes=False)
 
-    def test_open_mfdataset_concat_dim_none(self) -> None:
-        with create_tmp_file() as tmp1:
-            with create_tmp_file() as tmp2:
-                data = Dataset({"x": 0})
-                data.to_netcdf(tmp1)
-                Dataset({"x": np.nan}).to_netcdf(tmp2)
-                with open_mfdataset(
-                    [tmp1, tmp2], concat_dim=None, combine="nested"
-                ) as actual:
-                    assert_identical(data, actual)
-
-    def test_open_mfdataset_concat_dim_default_none(self) -> None:
-        with create_tmp_file() as tmp1:
-            with create_tmp_file() as tmp2:
-                data = Dataset({"x": 0})
-                data.to_netcdf(tmp1)
-                Dataset({"x": np.nan}).to_netcdf(tmp2)
-                with open_mfdataset([tmp1, tmp2], combine="nested") as actual:
-                    assert_identical(data, actual)
+    @pytest.mark.parametrize(
+        "kwargs",
+        [pytest.param({"concat_dim": None}, id="none"), pytest.param({}, id="default")],
+    )
+    def test_open_mfdataset_concat_dim(self, kwargs) -> None:
+        with set_options(use_new_combine_kwarg_defaults=True):
+            with create_tmp_file() as tmp1:
+                with create_tmp_file() as tmp2:
+                    data = Dataset({"x": 0})
+                    data.to_netcdf(tmp1)
+                    Dataset({"x": np.nan}).to_netcdf(tmp2)
+                    with open_mfdataset(
+                        [tmp1, tmp2], **kwargs, combine="nested"
+                    ) as actual:
+                        assert_identical(data, actual)
 
     def test_open_dataset(self) -> None:
         original = Dataset({"foo": ("x", np.random.randn(10))})
@@ -5203,7 +6208,9 @@ class TestDask(DatasetIOBase):
         )
         with create_tmp_file() as tmp:
             original.to_netcdf(tmp)
-            with open_mfdataset([tmp], concat_dim=dim, combine="nested") as actual:
+            with open_mfdataset(
+                [tmp], concat_dim=dim, data_vars="all", combine="nested"
+            ) as actual:
                 assert_identical(expected, actual)
 
     def test_open_multi_dataset(self) -> None:
@@ -5227,7 +6234,7 @@ class TestDask(DatasetIOBase):
             original.to_netcdf(tmp1)
             original.to_netcdf(tmp2)
             with open_mfdataset(
-                [tmp1, tmp2], concat_dim=dim, combine="nested"
+                [tmp1, tmp2], concat_dim=dim, data_vars="all", combine="nested"
             ) as actual:
                 assert_identical(expected, actual)
 
@@ -5290,17 +6297,29 @@ class TestDask(DatasetIOBase):
             original = Dataset({"foo": ("x", np.random.randn(10))})
             original.to_netcdf(tmp)
             ds = load_dataset(tmp)
+            assert_identical(original, ds)
             # this would fail if we used open_dataset instead of load_dataset
             ds.to_netcdf(tmp)
 
     def test_load_dataarray(self) -> None:
         with create_tmp_file() as tmp:
-            original = Dataset({"foo": ("x", np.random.randn(10))})
+            original = DataArray(np.random.randn(10), dims=["x"])
             original.to_netcdf(tmp)
-            ds = load_dataarray(tmp)
+            da = load_dataarray(tmp)
+            assert_identical(original, da)
             # this would fail if we used open_dataarray instead of
             # load_dataarray
-            ds.to_netcdf(tmp)
+            da.to_netcdf(tmp)
+
+    def test_load_datatree(self) -> None:
+        with create_tmp_file() as tmp:
+            original = DataTree(Dataset({"foo": ("x", np.random.randn(10))}))
+            original.to_netcdf(tmp)
+            dt = load_datatree(tmp)
+            xr.testing.assert_identical(original, dt)
+            # this would fail if we used open_datatree instead of
+            # load_datatree
+            dt.to_netcdf(tmp)
 
     @pytest.mark.skipif(
         ON_WINDOWS,
@@ -5335,30 +6354,27 @@ class TestDask(DatasetIOBase):
 @pytest.mark.filterwarnings("ignore:The binary mode of fromstring is deprecated")
 class TestPydap:
     def convert_to_pydap_dataset(self, original):
-        from pydap.model import BaseType, DatasetType, GridType
+        from pydap.model import BaseType, DatasetType
 
         ds = DatasetType("bears", **original.attrs)
         for key, var in original.data_vars.items():
-            v = GridType(key)
-            v[key] = BaseType(key, var.values, dimensions=var.dims, **var.attrs)
-            for d in var.dims:
-                v[d] = BaseType(d, var[d].values)
-            ds[key] = v
+            ds[key] = BaseType(
+                key, var.values, dtype=var.values.dtype.kind, dims=var.dims, **var.attrs
+            )
         # check all dims are stored in ds
         for d in original.coords:
-            ds[d] = BaseType(
-                d, original[d].values, dimensions=(d,), **original[d].attrs
-            )
+            ds[d] = BaseType(d, original[d].values, dims=(d,), **original[d].attrs)
         return ds
 
     @contextlib.contextmanager
     def create_datasets(self, **kwargs):
         with open_example_dataset("bears.nc") as expected:
+            # print("QQ0:", expected["bears"].load())
             pydap_ds = self.convert_to_pydap_dataset(expected)
             actual = open_dataset(PydapDataStore(pydap_ds))
-            # TODO solve this workaround:
             # netcdf converts string to byte not unicode
-            expected["bears"] = expected["bears"].astype(str)
+            # fixed in pydap 3.5.6. https://github.com/pydap/pydap/issues/510
+            actual["bears"].values = actual["bears"].values.astype("S")
             yield actual, expected
 
     def test_cmp_local_file(self) -> None:
@@ -5372,15 +6388,15 @@ class TestPydap:
             # we don't check attributes exactly with assertDatasetIdentical()
             # because the test DAP server seems to insert some extra
             # attributes not found in the netCDF file.
-            # 2025/03/18 : The DAP server now modifies the keys too
-            # assert actual.attrs.keys() == expected.attrs.keys()
-            assert len(actual.attrs.keys()) == len(expected.attrs.keys())
+            assert actual.attrs.keys() == expected.attrs.keys()
 
         with self.create_datasets() as (actual, expected):
             assert_equal(actual[{"l": 2}], expected[{"l": 2}])
 
         with self.create_datasets() as (actual, expected):
-            assert_equal(actual.isel(i=0, j=-1), expected.isel(i=0, j=-1))
+            # always return arrays and not scalars
+            # scalars will be promoted to unicode for numpy >= 2.3.0
+            assert_equal(actual.isel(i=[0], j=[-1]), expected.isel(i=[0], j=[-1]))
 
         with self.create_datasets() as (actual, expected):
             assert_equal(actual.isel(j=slice(1, 2)), expected.isel(j=slice(1, 2)))
@@ -5402,7 +6418,6 @@ class TestPydap:
             with create_tmp_file() as tmp_file:
                 actual.to_netcdf(tmp_file)
                 with open_dataset(tmp_file) as actual2:
-                    actual2["bears"] = actual2["bears"].astype(str)
                     assert_equal(actual2, expected)
 
     @requires_dask
@@ -5416,26 +6431,45 @@ class TestPydap:
 @requires_pydap
 class TestPydapOnline(TestPydap):
     @contextlib.contextmanager
-    def create_datasets(self, **kwargs):
+    def create_dap2_datasets(self, **kwargs):
+        # in pydap 3.5.0, urls defaults to dap2.
         url = "http://test.opendap.org/opendap/data/nc/bears.nc"
         actual = open_dataset(url, engine="pydap", **kwargs)
+        # pydap <3.5.6 converts to unicode dtype=|U. Not what
+        # xarray expects. Thus force to bytes dtype. pydap >=3.5.6
+        # does not convert to unicode. https://github.com/pydap/pydap/issues/510
+        actual["bears"].values = actual["bears"].values.astype("S")
+        with open_example_dataset("bears.nc") as expected:
+            yield actual, expected
+
+    def output_grid_deprecation_warning_dap2dataset(self):
+        with pytest.warns(DeprecationWarning, match="`output_grid` is deprecated"):
+            with self.create_dap2_datasets(output_grid=True) as (actual, expected):
+                assert_equal(actual, expected)
+
+    def create_dap4_dataset(self, **kwargs):
+        url = "dap4://test.opendap.org/opendap/data/nc/bears.nc"
+        actual = open_dataset(url, engine="pydap", **kwargs)
         with open_example_dataset("bears.nc") as expected:
             # workaround to restore string which is converted to byte
-            expected["bears"] = expected["bears"].astype(str)
+            # only needed for pydap <3.5.6 https://github.com/pydap/pydap/issues/510
+            expected["bears"].values = expected["bears"].values.astype("S")
             yield actual, expected
 
     def test_session(self) -> None:
-        from pydap.cas.urs import setup_session
+        from requests import Session
 
-        session = setup_session("XarrayTestUser", "Xarray2017")
+        session = Session()  # blank requests.Session object
         with mock.patch("pydap.client.open_url") as mock_func:
             xr.backends.PydapDataStore.open("http://test.url", session=session)
         mock_func.assert_called_with(
             url="http://test.url",
             application=None,
             session=session,
-            output_grid=True,
+            output_grid=False,
             timeout=120,
+            verify=True,
+            user_charset=None,
         )
 
 
@@ -5613,8 +6647,8 @@ class TestDataArrayToNetCDF:
     def test_dataarray_to_netcdf_return_bytes(self) -> None:
         # regression test for GH1410
         data = xr.DataArray([1, 2, 3])
-        output = data.to_netcdf()
-        assert isinstance(output, bytes)
+        output = data.to_netcdf(engine="scipy")
+        assert isinstance(output, memoryview)
 
     def test_dataarray_to_netcdf_no_name_pathlib(self) -> None:
         original_da = DataArray(np.arange(12).reshape((3, 4)))
@@ -5687,6 +6721,27 @@ class TestDataArrayToZarr:
         with open_dataarray(tmp_store, engine="zarr") as loaded_da:
             assert_identical(original_da, loaded_da)
 
+    @requires_dask
+    def test_dataarray_to_zarr_align_chunks_true(self, tmp_store) -> None:
+        # TODO: Improve data integrity checks when using Dask.
+        #   Detecting automatic alignment issues in Dask can be tricky,
+        #   as unintended misalignment might lead to subtle data corruption.
+        #   For now, ensure that the parameter is present, but explore
+        #   more robust verification methods to confirm data consistency.
+
+        skip_if_zarr_format_3(tmp_store)
+        arr = DataArray(
+            np.arange(4), dims=["a"], coords={"a": np.arange(4)}, name="foo"
+        ).chunk(a=(2, 1, 1))
+
+        arr.to_zarr(
+            tmp_store,
+            align_chunks=True,
+            encoding={"foo": {"chunks": (3,)}},
+        )
+        with open_dataarray(tmp_store, engine="zarr") as loaded_da:
+            assert_identical(arr, loaded_da)
+
 
 @requires_scipy_or_netCDF4
 def test_no_warning_from_dask_effective_get() -> None:
@@ -5742,6 +6797,7 @@ def _assert_no_dates_out_of_range_warnin
 
 
 @requires_scipy_or_netCDF4
+@pytest.mark.filterwarnings("ignore:deallocating CachingFileManager")
 @pytest.mark.parametrize("calendar", _STANDARD_CALENDARS)
 def test_use_cftime_standard_calendar_default_in_range(calendar) -> None:
     x = [0, 1]
@@ -5864,6 +6920,7 @@ def test_use_cftime_false_standard_calen
 
 
 @requires_scipy_or_netCDF4
+@pytest.mark.filterwarnings("ignore:deallocating CachingFileManager")
 @pytest.mark.parametrize("calendar", ["standard", "gregorian"])
 def test_use_cftime_false_standard_calendar_out_of_range(calendar) -> None:
     x = [0, 1]
@@ -5876,12 +6933,13 @@ def test_use_cftime_false_standard_calen
 
     with create_tmp_file() as tmp_file:
         original.to_netcdf(tmp_file)
+        decoder = CFDatetimeCoder(use_cftime=False)
         with pytest.raises((OutOfBoundsDatetime, ValueError)):
-            decoder = CFDatetimeCoder(use_cftime=False)
             open_dataset(tmp_file, decode_times=decoder)
 
 
 @requires_scipy_or_netCDF4
+@pytest.mark.filterwarnings("ignore:deallocating CachingFileManager")
 @pytest.mark.parametrize("calendar", _NON_STANDARD_CALENDARS)
 @pytest.mark.parametrize("units_year", [1500, 2000, 2500])
 def test_use_cftime_false_nonstandard_calendar(calendar, units_year) -> None:
@@ -5895,8 +6953,8 @@ def test_use_cftime_false_nonstandard_ca
 
     with create_tmp_file() as tmp_file:
         original.to_netcdf(tmp_file)
+        decoder = CFDatetimeCoder(use_cftime=False)
         with pytest.raises((OutOfBoundsDatetime, ValueError)):
-            decoder = CFDatetimeCoder(use_cftime=False)
             open_dataset(tmp_file, decode_times=decoder)
 
 
@@ -5959,8 +7017,12 @@ def test_extract_zarr_variable_encoding(
 def test_open_fsspec() -> None:
     import fsspec
 
-    if not hasattr(zarr.storage, "FSStore") or not hasattr(
-        zarr.storage.FSStore, "getitems"
+    if not (
+        (
+            hasattr(zarr.storage, "FSStore")
+            and hasattr(zarr.storage.FSStore, "getitems")
+        )  # zarr v2
+        or hasattr(zarr.storage, "FsspecStore")  # zarr v3
     ):
         pytest.skip("zarr too old")
 
@@ -6111,6 +7173,9 @@ def test_netcdf4_entrypoint(tmp_path: Pa
     assert entrypoint.guess_can_open("something-local.cdf")
     assert not entrypoint.guess_can_open("not-found-and-no-extension")
 
+    contents = ds.to_netcdf(engine="netcdf4")
+    _check_guess_can_open_and_open(entrypoint, contents, engine="netcdf4", expected=ds)
+
     path = tmp_path / "baz"
     with open(path, "wb") as f:
         f.write(b"not-a-netcdf-file")
@@ -6144,7 +7209,7 @@ def test_scipy_entrypoint(tmp_path: Path
     assert entrypoint.guess_can_open("something-local.nc")
     assert entrypoint.guess_can_open("something-local.nc.gz")
     assert not entrypoint.guess_can_open("not-found-and-no-extension")
-    assert not entrypoint.guess_can_open(b"not-a-netcdf-file")  # type: ignore[arg-type]
+    assert not entrypoint.guess_can_open(b"not-a-netcdf-file")
 
 
 @requires_h5netcdf
@@ -6161,17 +7226,43 @@ def test_h5netcdf_entrypoint(tmp_path: P
     with open(path, "rb") as f:
         _check_guess_can_open_and_open(entrypoint, f, engine="h5netcdf", expected=ds)
 
+    contents = ds.to_netcdf(engine="h5netcdf")
+    _check_guess_can_open_and_open(entrypoint, contents, engine="h5netcdf", expected=ds)
+
     assert entrypoint.guess_can_open("something-local.nc")
     assert entrypoint.guess_can_open("something-local.nc4")
     assert entrypoint.guess_can_open("something-local.cdf")
     assert not entrypoint.guess_can_open("not-found-and-no-extension")
 
 
+@requires_zarr
+def test_zarr_entrypoint(tmp_path: Path) -> None:
+    from xarray.backends.zarr import ZarrBackendEntrypoint
+
+    entrypoint = ZarrBackendEntrypoint()
+    ds = create_test_data()
+
+    path = tmp_path / "foo.zarr"
+    ds.to_zarr(path)
+    _check_guess_can_open_and_open(entrypoint, path, engine="zarr", expected=ds)
+    _check_guess_can_open_and_open(entrypoint, str(path), engine="zarr", expected=ds)
+
+    # add a trailing slash to the path and check again
+    _check_guess_can_open_and_open(
+        entrypoint, str(path) + "/", engine="zarr", expected=ds
+    )
+
+    # Test the new functionality: .zarr with trailing slash
+    assert entrypoint.guess_can_open("something-local.zarr")
+    assert entrypoint.guess_can_open("something-local.zarr/")  # With trailing slash
+    assert not entrypoint.guess_can_open("something-local.nc")
+    assert not entrypoint.guess_can_open("not-found-and-no-extension")
+    assert not entrypoint.guess_can_open("something.zarr.txt")
+
+
 @requires_netCDF4
 @pytest.mark.parametrize("str_type", (str, np.str_))
-def test_write_file_from_np_str(
-    str_type: type[str] | type[np.str_], tmpdir: str
-) -> None:
+def test_write_file_from_np_str(str_type: type[str | np.str_], tmpdir: str) -> None:
     # https://github.com/pydata/xarray/pull/5264
     scenarios = [str_type(v) for v in ["scenario_a", "scenario_b", "scenario_c"]]
     years = range(2015, 2100 + 1)
@@ -6210,6 +7301,7 @@ class TestNCZarr:
         # https://github.com/Unidata/netcdf-c/issues/2259
         ds = ds.drop_vars("dim3")
 
+        # engine="netcdf4" is not required for backwards compatibility
         ds.to_netcdf(f"file://{filename}#mode=nczarr")
         return ds
 
@@ -6261,6 +7353,24 @@ def test_zarr_closing_internal_zip_store
 
 
 @requires_zarr
+@pytest.mark.parametrize("create_default_indexes", [True, False])
+def test_zarr_create_default_indexes(tmp_path, create_default_indexes) -> None:
+    store_path = tmp_path / "tmp.zarr"
+    original_ds = xr.Dataset({"data": ("x", np.arange(3))}, coords={"x": [-1, 0, 1]})
+    original_ds.to_zarr(store_path, mode="w")
+
+    with open_dataset(
+        store_path, engine="zarr", create_default_indexes=create_default_indexes
+    ) as loaded_ds:
+        if create_default_indexes:
+            assert list(loaded_ds.xindexes) == ["x"] and isinstance(
+                loaded_ds.xindexes["x"], PandasIndex
+            )
+        else:
+            assert len(loaded_ds.xindexes) == 0
+
+
+@requires_zarr
 @pytest.mark.usefixtures("default_zarr_format")
 def test_raises_key_error_on_invalid_zarr_store(tmp_path):
     root = zarr.open_group(tmp_path / "tmp.zarr")
@@ -6426,7 +7536,7 @@ class TestZarrRegionAuto:
                 )
 
             # chunking with dask sidesteps the encoding check, so we need a different check
-            with pytest.raises(ValueError, match="Specified zarr chunks"):
+            with pytest.raises(ValueError, match="Specified Zarr chunks"):
                 self.save(
                     target,
                     da2.chunk({"x": 1, "y": 1, "time": 1}),
@@ -6434,6 +7544,10 @@ class TestZarrRegionAuto:
                     mode="a",
                 )
 
+    @pytest.mark.xfail(
+        ON_WINDOWS,
+        reason="Permission errors from Zarr: https://github.com/pydata/xarray/pull/10793",
+    )
     @requires_dask
     def test_zarr_region_chunk_partial_offset(self):
         # https://github.com/pydata/xarray/pull/8459#issuecomment-1819417545
@@ -6694,6 +7808,54 @@ class TestZarrRegionAuto:
             chunk = chunk.chunk()
             self.save(store, chunk.chunk(), region=region)
 
+    @requires_dask
+    def test_dataset_to_zarr_align_chunks_true(self, tmp_store) -> None:
+        # This test is a replica of the one in `test_dataarray_to_zarr_align_chunks_true`
+        # but for datasets
+        with self.create_zarr_target() as store:
+            ds = (
+                DataArray(
+                    np.arange(4).reshape((2, 2)),
+                    dims=["a", "b"],
+                    coords={
+                        "a": np.arange(2),
+                        "b": np.arange(2),
+                    },
+                )
+                .chunk(a=(1, 1), b=(1, 1))
+                .to_dataset(name="foo")
+            )
+
+            self.save(
+                store,
+                ds,
+                align_chunks=True,
+                encoding={"foo": {"chunks": (3, 3)}},
+                mode="w",
+            )
+            assert_identical(ds, xr.open_zarr(store))
+
+            ds = (
+                DataArray(
+                    np.arange(4, 8).reshape((2, 2)),
+                    dims=["a", "b"],
+                    coords={
+                        "a": np.arange(2),
+                        "b": np.arange(2),
+                    },
+                )
+                .chunk(a=(1, 1), b=(1, 1))
+                .to_dataset(name="foo")
+            )
+
+            self.save(
+                store,
+                ds,
+                align_chunks=True,
+                region="auto",
+            )
+            assert_identical(ds, xr.open_zarr(store))
+
 
 @requires_h5netcdf
 @requires_fsspec
@@ -6710,7 +7872,8 @@ def test_h5netcdf_storage_options() -> N
             files,
             engine="h5netcdf",
             concat_dim="time",
+            data_vars="all",
             combine="nested",
             storage_options={"skip_instance_cache": False},
         ) as ds:
-            assert_identical(xr.concat([ds1, ds2], dim="time"), ds)
+            assert_identical(xr.concat([ds1, ds2], dim="time", data_vars="all"), ds)
diff -pruN 2025.03.1-8/xarray/tests/test_backends_api.py 2025.10.1-1/xarray/tests/test_backends_api.py
--- 2025.03.1-8/xarray/tests/test_backends_api.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/xarray/tests/test_backends_api.py	2025-10-10 10:38:05.000000000 +0000
@@ -1,16 +1,20 @@
 from __future__ import annotations
 
+import io
+import re
+import sys
 from numbers import Number
 
 import numpy as np
 import pytest
 
 import xarray as xr
-from xarray.backends.api import _get_default_engine
+from xarray.backends.writers import get_default_netcdf_write_engine
 from xarray.tests import (
     assert_identical,
     assert_no_warnings,
     requires_dask,
+    requires_h5netcdf,
     requires_netCDF4,
     requires_scipy,
 )
@@ -18,15 +22,97 @@ from xarray.tests import (
 
 @requires_netCDF4
 @requires_scipy
-def test__get_default_engine() -> None:
-    engine_remote = _get_default_engine("http://example.org/test.nc", allow_remote=True)
-    assert engine_remote == "netcdf4"
+@requires_h5netcdf
+def test_get_default_netcdf_write_engine() -> None:
+    assert xr.get_options()["netcdf_engine_order"] == ("netcdf4", "h5netcdf", "scipy")
 
-    engine_gz = _get_default_engine("/example.gz")
-    assert engine_gz == "scipy"
+    engine = get_default_netcdf_write_engine("", format=None)
+    assert engine == "netcdf4"
 
-    engine_default = _get_default_engine("/example")
-    assert engine_default == "netcdf4"
+    engine = get_default_netcdf_write_engine("", format="NETCDF4")
+    assert engine == "netcdf4"
+
+    engine = get_default_netcdf_write_engine("", format="NETCDF4_CLASSIC")
+    assert engine == "netcdf4"
+
+    engine = get_default_netcdf_write_engine("", format="NETCDF3_CLASSIC")
+    assert engine == "netcdf4"
+
+    engine = get_default_netcdf_write_engine(io.BytesIO(), format=None)
+    assert engine == "h5netcdf"
+
+    engine = get_default_netcdf_write_engine(io.BytesIO(), format="NETCDF4")
+    assert engine == "h5netcdf"
+
+    engine = get_default_netcdf_write_engine(io.BytesIO(), format="NETCDF3_CLASSIC")
+    assert engine == "scipy"
+
+    engine = get_default_netcdf_write_engine("path.zarr#mode=nczarr", format=None)
+    assert engine == "netcdf4"
+
+    with xr.set_options(netcdf_engine_order=["netcdf4", "scipy", "h5netcdf"]):
+        engine = get_default_netcdf_write_engine(io.BytesIO(), format=None)
+        assert engine == "scipy"
+
+        engine = get_default_netcdf_write_engine(io.BytesIO(), format="NETCDF4")
+        assert engine == "h5netcdf"
+
+        engine = get_default_netcdf_write_engine(io.BytesIO(), format="NETCDF3_CLASSIC")
+        assert engine == "scipy"
+
+    with xr.set_options(netcdf_engine_order=["h5netcdf", "scipy", "netcdf4"]):
+        engine = get_default_netcdf_write_engine("", format=None)
+        assert engine == "h5netcdf"
+
+        engine = get_default_netcdf_write_engine("", format="NETCDF4")
+        assert engine == "h5netcdf"
+
+        engine = get_default_netcdf_write_engine("", format="NETCDF4_CLASSIC")
+        assert engine == "netcdf4"
+
+        engine = get_default_netcdf_write_engine(io.BytesIO(), format="NETCDF4")
+        assert engine == "h5netcdf"
+
+        engine = get_default_netcdf_write_engine("", format="NETCDF3_CLASSIC")
+        assert engine == "scipy"
+
+        engine = get_default_netcdf_write_engine(io.BytesIO(), format="NETCDF3_CLASSIC")
+        assert engine == "scipy"
+
+
+@requires_h5netcdf
+def test_default_engine_h5netcdf(monkeypatch):
+    """Test the default netcdf engine when h5netcdf is the only importable module."""
+
+    monkeypatch.delitem(sys.modules, "netCDF4", raising=False)
+    monkeypatch.delitem(sys.modules, "scipy", raising=False)
+    monkeypatch.setattr(sys, "meta_path", [])
+
+    engine = get_default_netcdf_write_engine("", format=None)
+    assert engine == "h5netcdf"
+
+    with pytest.raises(
+        ValueError,
+        match=re.escape(
+            "cannot write NetCDF files with format='NETCDF3_CLASSIC' because "
+            "none of the suitable backend libraries (SUITABLE_BACKENDS) are installed"
+        ).replace("SUITABLE_BACKENDS", r"(scipy, netCDF4)|(netCDF4, scipy)"),
+    ):
+        get_default_netcdf_write_engine("", format="NETCDF3_CLASSIC")
+
+
+def test_default_engine_nczarr_no_netcdf4_python(monkeypatch):
+    monkeypatch.delitem(sys.modules, "netCDF4", raising=False)
+    monkeypatch.setattr(sys, "meta_path", [])
+
+    with pytest.raises(
+        ValueError,
+        match=re.escape(
+            "cannot write NetCDF files in NCZarr format because "
+            "none of the suitable backend libraries (netCDF4) are installed"
+        ),
+    ):
+        get_default_netcdf_write_engine("#mode=nczarr", format=None)
 
 
 def test_custom_engine() -> None:
@@ -201,3 +287,39 @@ class TestPreferredChunks:
                 chunks=dict(zip(initial[self.var_name].dims, req_chunks, strict=True)),
             )
         self.check_dataset(initial, final, explicit_chunks(req_chunks, shape))
+
+    @pytest.mark.parametrize("create_default_indexes", [True, False])
+    def test_default_indexes(self, create_default_indexes):
+        """Create default indexes if the backend does not create them."""
+        coords = xr.Coordinates({"x": ("x", [0, 1]), "y": list("abc")}, indexes={})
+        initial = xr.Dataset({"a": ("x", [1, 2])}, coords=coords)
+
+        with assert_no_warnings():
+            final = xr.open_dataset(
+                initial,
+                engine=PassThroughBackendEntrypoint,
+                create_default_indexes=create_default_indexes,
+            )
+
+        if create_default_indexes:
+            assert all(name in final.xindexes for name in ["x", "y"])
+        else:
+            assert len(final.xindexes) == 0
+
+    @pytest.mark.parametrize("create_default_indexes", [True, False])
+    def test_default_indexes_passthrough(self, create_default_indexes):
+        """Allow creating indexes in the backend."""
+
+        initial = xr.Dataset(
+            {"a": (["x", "y"], [[1, 2, 3], [4, 5, 6]])},
+            coords={"x": ("x", [0, 1]), "y": ("y", list("abc"))},
+        ).stack(z=["x", "y"])
+
+        with assert_no_warnings():
+            final = xr.open_dataset(
+                initial,
+                engine=PassThroughBackendEntrypoint,
+                create_default_indexes=create_default_indexes,
+            )
+
+        assert initial.coords.equals(final.coords)
diff -pruN 2025.03.1-8/xarray/tests/test_backends_chunks.py 2025.10.1-1/xarray/tests/test_backends_chunks.py
--- 2025.03.1-8/xarray/tests/test_backends_chunks.py	1970-01-01 00:00:00.000000000 +0000
+++ 2025.10.1-1/xarray/tests/test_backends_chunks.py	2025-10-10 10:38:05.000000000 +0000
@@ -0,0 +1,116 @@
+import numpy as np
+import pytest
+
+import xarray as xr
+from xarray.backends.chunks import align_nd_chunks, build_grid_chunks, grid_rechunk
+from xarray.tests import requires_dask
+
+
+@pytest.mark.parametrize(
+    "size, chunk_size, region, expected_chunks",
+    [
+        (10, 3, slice(1, 11), (2, 3, 3, 2)),
+        (10, 3, slice(None, None), (3, 3, 3, 1)),
+        (10, 3, None, (3, 3, 3, 1)),
+        (10, 3, slice(None, 10), (3, 3, 3, 1)),
+        (10, 3, slice(0, None), (3, 3, 3, 1)),
+        (2, 10, slice(0, 3), (2,)),
+        (4, 10, slice(7, 10), (3, 1)),
+    ],
+)
+def test_build_grid_chunks(size, chunk_size, region, expected_chunks):
+    grid_chunks = build_grid_chunks(
+        size,
+        chunk_size=chunk_size,
+        region=region,
+    )
+    assert grid_chunks == expected_chunks
+
+
+@pytest.mark.parametrize(
+    "nd_v_chunks, nd_backend_chunks, expected_chunks",
+    [
+        (((2, 2, 2, 2),), ((3, 3, 2),), ((3, 3, 2),)),
+        # ND cases
+        (((2, 4), (2, 3)), ((2, 2, 2), (3, 2)), ((2, 4), (3, 2))),
+    ],
+)
+def test_align_nd_chunks(nd_v_chunks, nd_backend_chunks, expected_chunks):
+    aligned_nd_chunks = align_nd_chunks(
+        nd_v_chunks=nd_v_chunks,
+        nd_backend_chunks=nd_backend_chunks,
+    )
+    assert aligned_nd_chunks == expected_chunks
+
+
+@requires_dask
+@pytest.mark.parametrize(
+    "enc_chunks, region, nd_v_chunks, expected_chunks",
+    [
+        (
+            (3,),
+            (slice(2, 14),),
+            ((6, 6),),
+            (
+                (
+                    4,
+                    6,
+                    2,
+                ),
+            ),
+        ),
+        (
+            (6,),
+            (slice(0, 13),),
+            ((6, 7),),
+            (
+                (
+                    6,
+                    7,
+                ),
+            ),
+        ),
+        ((6,), (slice(0, 13),), ((6, 6, 1),), ((6, 6, 1),)),
+        ((3,), (slice(2, 14),), ((1, 3, 2, 6),), ((1, 3, 6, 2),)),
+        ((3,), (slice(2, 14),), ((2, 2, 2, 6),), ((4, 6, 2),)),
+        ((3,), (slice(2, 14),), ((3, 1, 3, 5),), ((4, 3, 5),)),
+        ((4,), (slice(1, 13),), ((1, 1, 1, 4, 3, 2),), ((3, 4, 4, 1),)),
+        ((5,), (slice(4, 16),), ((5, 7),), ((6, 6),)),
+        # ND cases
+        (
+            (3, 6),
+            (slice(2, 14), slice(0, 13)),
+            ((6, 6), (6, 7)),
+            (
+                (
+                    4,
+                    6,
+                    2,
+                ),
+                (
+                    6,
+                    7,
+                ),
+            ),
+        ),
+    ],
+)
+def test_grid_rechunk(enc_chunks, region, nd_v_chunks, expected_chunks):
+    dims = [f"dim_{i}" for i in range(len(region))]
+    coords = {
+        dim: list(range(r.start, r.stop)) for dim, r in zip(dims, region, strict=False)
+    }
+    shape = tuple(r.stop - r.start for r in region)
+    arr = xr.DataArray(
+        np.arange(np.prod(shape)).reshape(shape),
+        dims=dims,
+        coords=coords,
+    )
+    arr = arr.chunk(dict(zip(dims, nd_v_chunks, strict=False)))
+
+    result = grid_rechunk(
+        arr.variable,
+        enc_chunks=enc_chunks,
+        region=region,
+    )
+    assert result.chunks == expected_chunks
diff -pruN 2025.03.1-8/xarray/tests/test_backends_common.py 2025.10.1-1/xarray/tests/test_backends_common.py
--- 2025.03.1-8/xarray/tests/test_backends_common.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/xarray/tests/test_backends_common.py	2025-10-10 10:38:05.000000000 +0000
@@ -1,9 +1,14 @@
 from __future__ import annotations
 
+import io
+import re
+
 import numpy as np
 import pytest
 
+import xarray as xr
 from xarray.backends.common import _infer_dtype, robust_getitem
+from xarray.tests import requires_scipy
 
 
 class DummyFailure(Exception):
@@ -43,3 +48,17 @@ def test_robust_getitem() -> None:
 def test_infer_dtype_error_on_mixed_types(data):
     with pytest.raises(ValueError, match="unable to infer dtype on variable"):
         _infer_dtype(data, "test")
+
+
+@requires_scipy
+def test_encoding_failure_note():
+    # Create an arbitrary value that cannot be encoded in netCDF3
+    ds = xr.Dataset({"invalid": np.array([2**63 - 1], dtype=np.int64)})
+    f = io.BytesIO()
+    with pytest.raises(
+        ValueError,
+        match=re.escape(
+            "Raised while encoding variable 'invalid' with value <xarray.Variable"
+        ),
+    ):
+        ds.to_netcdf(f, engine="scipy")
diff -pruN 2025.03.1-8/xarray/tests/test_backends_datatree.py 2025.10.1-1/xarray/tests/test_backends_datatree.py
--- 2025.03.1-8/xarray/tests/test_backends_datatree.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/xarray/tests/test_backends_datatree.py	2025-10-10 10:38:05.000000000 +0000
@@ -1,6 +1,8 @@
 from __future__ import annotations
 
+import contextlib
 import re
+import sys
 from collections.abc import Callable, Generator, Hashable
 from pathlib import Path
 from typing import TYPE_CHECKING, Literal, cast
@@ -9,25 +11,42 @@ import numpy as np
 import pytest
 
 import xarray as xr
-from xarray.backends.api import open_datatree, open_groups
-from xarray.core.datatree import DataTree
+from xarray import DataTree, load_datatree, open_datatree, open_groups
 from xarray.testing import assert_equal, assert_identical
 from xarray.tests import (
     has_zarr_v3,
+    network,
     parametrize_zarr_format,
     requires_dask,
     requires_h5netcdf,
+    requires_h5netcdf_or_netCDF4,
     requires_netCDF4,
+    requires_pydap,
     requires_zarr,
 )
+from xarray.tests.test_backends import TestNetCDF4Data as _TestNetCDF4Data
 
 if TYPE_CHECKING:
-    from xarray.core.datatree_io import T_DataTreeNetcdfEngine
+    from xarray.backends.writers import T_DataTreeNetcdfEngine
 
-try:
+with contextlib.suppress(ImportError):
     import netCDF4 as nc4
-except ImportError:
-    pass
+
+
+ON_WINDOWS = sys.platform == "win32"
+
+
+class TestNetCDF4DataTree(_TestNetCDF4Data):
+    @contextlib.contextmanager
+    def open(self, path, **kwargs):
+        with open_datatree(path, engine=self.engine, **kwargs) as ds:
+            yield ds.to_dataset()
+
+    def test_child_group_with_inconsistent_dimensions(self) -> None:
+        with pytest.raises(
+            ValueError, match=r"group '/child' is not aligned with its parents"
+        ):
+            super().test_child_group_with_inconsistent_dimensions()
 
 
 def diff_chunks(
@@ -184,8 +203,8 @@ def unaligned_datatree_zarr_factory(
     yield _unaligned_datatree_zarr
 
 
-class DatatreeIOBase:
-    engine: T_DataTreeNetcdfEngine | None = None
+class NetCDFIOBase:
+    engine: T_DataTreeNetcdfEngine | None
 
     def test_to_netcdf(self, tmpdir, simple_datatree):
         filepath = tmpdir / "test.nc"
@@ -214,7 +233,7 @@ class DatatreeIOBase:
         ) as roundtrip_dt:
             assert original_dt["test"].dtype == roundtrip_dt["test"].dtype
 
-    def test_to_netcdf_inherited_coords(self, tmpdir):
+    def test_to_netcdf_inherited_coords(self, tmpdir) -> None:
         filepath = tmpdir / "test.nc"
         original_dt = DataTree.from_dict(
             {
@@ -229,13 +248,13 @@ class DatatreeIOBase:
             subtree = cast(DataTree, roundtrip_dt["/sub"])
             assert "x" not in subtree.to_dataset(inherit=False).coords
 
-    def test_netcdf_encoding(self, tmpdir, simple_datatree):
+    def test_netcdf_encoding(self, tmpdir, simple_datatree) -> None:
         filepath = tmpdir / "test.nc"
         original_dt = simple_datatree
 
         # add compression
         comp = dict(zlib=True, complevel=9)
-        enc = {"/set2": {var: comp for var in original_dt["/set2"].dataset.data_vars}}
+        enc = {"/set2": dict.fromkeys(original_dt["/set2"].dataset.data_vars, comp)}
 
         original_dt.to_netcdf(filepath, encoding=enc, engine=self.engine)
         with open_datatree(filepath, engine=self.engine) as roundtrip_dt:
@@ -243,10 +262,10 @@ class DatatreeIOBase:
             assert roundtrip_dt["/set2/a"].encoding["complevel"] == comp["complevel"]
 
             enc["/not/a/group"] = {"foo": "bar"}  # type: ignore[dict-item]
-            with pytest.raises(ValueError, match="unexpected encoding group.*"):
+            with pytest.raises(ValueError, match=r"unexpected encoding group.*"):
                 original_dt.to_netcdf(filepath, encoding=enc, engine=self.engine)
 
-    def test_write_subgroup(self, tmpdir):
+    def test_write_subgroup(self, tmpdir) -> None:
         original_dt = DataTree.from_dict(
             {
                 "/": xr.Dataset(coords={"x": [1, 2, 3]}),
@@ -264,27 +283,52 @@ class DatatreeIOBase:
             assert_equal(original_dt, roundtrip_dt)
             assert_identical(expected_dt, roundtrip_dt)
 
+    @requires_netCDF4
+    def test_no_redundant_dimensions(self, tmpdir) -> None:
+        # regression test for https://github.com/pydata/xarray/issues/10241
+        original_dt = DataTree.from_dict(
+            {
+                "/": xr.Dataset(coords={"x": [1, 2, 3]}),
+                "/child": xr.Dataset({"foo": ("x", [4, 5, 6])}),
+            }
+        )
+        filepath = tmpdir / "test.zarr"
+        original_dt.to_netcdf(filepath, engine=self.engine)
 
-@requires_netCDF4
-class TestNetCDF4DatatreeIO(DatatreeIOBase):
-    engine: T_DataTreeNetcdfEngine | None = "netcdf4"
+        root = nc4.Dataset(str(filepath))
+        child = root.groups["child"]
+        assert list(root.dimensions) == ["x"]
+        assert list(child.dimensions) == []
 
-    def test_open_datatree(self, unaligned_datatree_nc) -> None:
-        """Test if `open_datatree` fails to open a netCDF4 with an unaligned group hierarchy."""
+    @requires_dask
+    def test_compute_false(self, tmpdir, simple_datatree):
+        filepath = tmpdir / "test.nc"
+        original_dt = simple_datatree.chunk()
+        result = original_dt.to_netcdf(filepath, engine=self.engine, compute=False)
 
-        with pytest.raises(
-            ValueError,
-            match=(
-                re.escape(
-                    "group '/Group1/subgroup1' is not aligned with its parents:\nGroup:\n"
-                )
-                + ".*"
-            ),
-        ):
-            open_datatree(unaligned_datatree_nc)
+        if not ON_WINDOWS:
+            # File at filepath is not closed until .compute() is called. On
+            # Windows, this means we can't open it yet.
+            with open_datatree(filepath, engine=self.engine) as in_progress_dt:
+                assert in_progress_dt.isomorphic(original_dt)
+                assert not in_progress_dt.equals(original_dt)
+
+        result.compute()
+        with open_datatree(filepath, engine=self.engine) as written_dt:
+            assert_identical(written_dt, original_dt)
+
+    def test_default_write_engine(self, tmpdir, simple_datatree, monkeypatch):
+        # Ensure the other netCDF library are not installed
+        exclude = "netCDF4" if self.engine == "h5netcdf" else "h5netcdf"
+        monkeypatch.delitem(sys.modules, exclude, raising=False)
+        monkeypatch.setattr(sys, "meta_path", [])
+
+        filepath = tmpdir + "/phony_dims.nc"
+        original_dt = simple_datatree
+        original_dt.to_netcdf(filepath)  # should not raise
 
     @requires_dask
-    def test_open_datatree_chunks(self, tmpdir, simple_datatree) -> None:
+    def test_open_datatree_chunks(self, tmpdir) -> None:
         filepath = tmpdir / "test.nc"
 
         chunks = {"x": 2, "y": 1}
@@ -299,13 +343,82 @@ class TestNetCDF4DatatreeIO(DatatreeIOBa
                 "/group2": set2_data.chunk(chunks),
             }
         )
-        original_tree.to_netcdf(filepath, engine="netcdf4")
+        original_tree.to_netcdf(filepath, engine=self.engine)
 
-        with open_datatree(filepath, engine="netcdf4", chunks=chunks) as tree:
+        with open_datatree(filepath, engine=self.engine, chunks=chunks) as tree:
             xr.testing.assert_identical(tree, original_tree)
 
             assert_chunks_equal(tree, original_tree, enforce_dask=True)
 
+    def test_roundtrip_via_memoryview(self, simple_datatree) -> None:
+        original_dt = simple_datatree
+        memview = original_dt.to_netcdf(engine=self.engine)
+        roundtrip_dt = load_datatree(memview, engine=self.engine)
+        assert_equal(original_dt, roundtrip_dt)
+
+    def test_to_memoryview_compute_false(self, simple_datatree) -> None:
+        original_dt = simple_datatree
+        with pytest.raises(
+            NotImplementedError,
+            match=re.escape("to_netcdf() with compute=False is not yet implemented"),
+        ):
+            original_dt.to_netcdf(engine=self.engine, compute=False)
+
+    def test_open_datatree_specific_group(self, tmpdir, simple_datatree) -> None:
+        """Test opening a specific group within a NetCDF file using `open_datatree`."""
+        filepath = tmpdir / "test.nc"
+        group = "/set1"
+        original_dt = simple_datatree
+        original_dt.to_netcdf(filepath, engine=self.engine)
+        expected_subtree = original_dt[group].copy()
+        expected_subtree.orphan()
+        with open_datatree(filepath, group=group, engine=self.engine) as subgroup_tree:
+            assert subgroup_tree.root.parent is None
+            assert_equal(subgroup_tree, expected_subtree)
+
+
+@requires_h5netcdf_or_netCDF4
+class TestGenericNetCDFIO(NetCDFIOBase):
+    engine: T_DataTreeNetcdfEngine | None = None
+
+    @requires_netCDF4
+    def test_open_netcdf3(self, tmpdir) -> None:
+        filepath = tmpdir / "test.nc"
+        ds = xr.Dataset({"foo": 1})
+        ds.to_netcdf(filepath, format="NETCDF3_CLASSIC")
+
+        expected_dt = DataTree(ds)
+        roundtrip_dt = load_datatree(filepath)  # must use netCDF4 engine
+        assert_equal(expected_dt, roundtrip_dt)
+
+    @requires_h5netcdf
+    @requires_netCDF4
+    def test_memoryview_write_h5netcdf_read_netcdf4(self, simple_datatree) -> None:
+        original_dt = simple_datatree
+        memview = original_dt.to_netcdf(engine="h5netcdf")
+        roundtrip_dt = load_datatree(memview, engine="netcdf4")
+        assert_equal(original_dt, roundtrip_dt)
+
+    @requires_h5netcdf
+    @requires_netCDF4
+    def test_memoryview_write_netcdf4_read_h5netcdf(self, simple_datatree) -> None:
+        original_dt = simple_datatree
+        memview = original_dt.to_netcdf(engine="netcdf4")
+        roundtrip_dt = load_datatree(memview, engine="h5netcdf")
+        assert_equal(original_dt, roundtrip_dt)
+
+    def test_open_datatree_unaligned_hierarchy(self, unaligned_datatree_nc) -> None:
+        with pytest.raises(
+            ValueError,
+            match=(
+                re.escape(
+                    "group '/Group1/subgroup1' is not aligned with its parents:\nGroup:\n"
+                )
+                + ".*"
+            ),
+        ):
+            open_datatree(unaligned_datatree_nc)
+
     def test_open_groups(self, unaligned_datatree_nc) -> None:
         """Test `open_groups` with a netCDF4 file with an unaligned group hierarchy."""
         unaligned_dict_of_datasets = open_groups(unaligned_datatree_nc)
@@ -348,7 +461,7 @@ class TestNetCDF4DatatreeIO(DatatreeIOBa
         )
         original_tree.to_netcdf(filepath, mode="w")
 
-        dict_of_datasets = open_groups(filepath, engine="netcdf4", chunks=chunks)
+        dict_of_datasets = open_groups(filepath, chunks=chunks)
 
         for path, ds in dict_of_datasets.items():
             assert {k: max(vs) for k, vs in ds.chunksizes.items()} == chunks, (
@@ -358,6 +471,11 @@ class TestNetCDF4DatatreeIO(DatatreeIOBa
         for ds in dict_of_datasets.values():
             ds.close()
 
+
+@requires_netCDF4
+class TestNetCDF4DatatreeIO(NetCDFIOBase):
+    engine: T_DataTreeNetcdfEngine | None = "netcdf4"
+
     def test_open_groups_to_dict(self, tmpdir) -> None:
         """Create an aligned netCDF4 with the following structure to test `open_groups`
         and `DataTree.from_dict`.
@@ -405,21 +523,9 @@ class TestNetCDF4DatatreeIO(DatatreeIOBa
         for ds in aligned_dict_of_datasets.values():
             ds.close()
 
-    def test_open_datatree_specific_group(self, tmpdir, simple_datatree) -> None:
-        """Test opening a specific group within a NetCDF file using `open_datatree`."""
-        filepath = tmpdir / "test.nc"
-        group = "/set1"
-        original_dt = simple_datatree
-        original_dt.to_netcdf(filepath)
-        expected_subtree = original_dt[group].copy()
-        expected_subtree.orphan()
-        with open_datatree(filepath, group=group, engine=self.engine) as subgroup_tree:
-            assert subgroup_tree.root.parent is None
-            assert_equal(subgroup_tree, expected_subtree)
-
 
 @requires_h5netcdf
-class TestH5NetCDFDatatreeIO(DatatreeIOBase):
+class TestH5NetCDFDatatreeIO(NetCDFIOBase):
     engine: T_DataTreeNetcdfEngine | None = "h5netcdf"
 
     def test_phony_dims_warning(self, tmpdir) -> None:
@@ -445,13 +551,118 @@ class TestH5NetCDFDatatreeIO(DatatreeIOB
                     "phony_dim_3": 25,
                 }
 
+    def test_roundtrip_using_filelike_object(self, tmpdir, simple_datatree) -> None:
+        original_dt = simple_datatree
+        filepath = tmpdir + "/test.nc"
+        # h5py requires both read and write access when writing, it will
+        # work with file-like objects provided they support both, and are
+        # seekable.
+        with open(filepath, "wb+") as file:
+            original_dt.to_netcdf(file, engine=self.engine)
+        with open(filepath, "rb") as file:
+            with open_datatree(file, engine=self.engine) as roundtrip_dt:
+                assert_equal(original_dt, roundtrip_dt)
+
+
+@network
+@requires_pydap
+class TestPyDAPDatatreeIO:
+    """Test PyDAP backend for DataTree."""
+
+    engine: T_DataTreeNetcdfEngine | None = "pydap"
+    # you can check these by adding a .dmr to urls, and replacing dap4 with http
+    unaligned_datatree_url = (
+        "dap4://test.opendap.org/opendap/dap4/unaligned_simple_datatree.nc.h5"
+    )
+    all_aligned_child_nodes_url = (
+        "dap4://test.opendap.org/opendap/dap4/all_aligned_child_nodes.nc.h5"
+    )
+    simplegroup_datatree_url = "dap4://test.opendap.org/opendap/dap4/SimpleGroup.nc4.h5"
+
+    def test_open_datatree_unaligned_hierarchy(
+        self, url=unaligned_datatree_url
+    ) -> None:
+        with pytest.raises(
+            ValueError,
+            match=(
+                re.escape(
+                    "group '/Group1/subgroup1' is not aligned with its parents:\nGroup:\n"
+                )
+                + ".*"
+            ),
+        ):
+            open_datatree(url, engine=self.engine)
+
+    def test_open_groups(self, url=unaligned_datatree_url) -> None:
+        """Test `open_groups` with a netCDF4/HDF5 file with an unaligned group hierarchy."""
+        unaligned_dict_of_datasets = open_groups(url, engine=self.engine)
+
+        # Check that group names are keys in the dictionary of `xr.Datasets`
+        assert "/" in unaligned_dict_of_datasets.keys()
+        assert "/Group1" in unaligned_dict_of_datasets.keys()
+        assert "/Group1/subgroup1" in unaligned_dict_of_datasets.keys()
+        # Check that group name returns the correct datasets
+        with xr.open_dataset(url, engine=self.engine, group="/") as expected:
+            assert_identical(unaligned_dict_of_datasets["/"], expected)
+        with xr.open_dataset(url, group="Group1", engine=self.engine) as expected:
+            assert_identical(unaligned_dict_of_datasets["/Group1"], expected)
+        with xr.open_dataset(
+            url,
+            group="/Group1/subgroup1",
+            engine=self.engine,
+        ) as expected:
+            assert_identical(unaligned_dict_of_datasets["/Group1/subgroup1"], expected)
+
+    def test_inherited_coords(self, url=simplegroup_datatree_url) -> None:
+        """Test that `open_datatree` inherits coordinates from root tree.
+
+        This particular h5 file is a test file that inherits the time coordinate from the root
+        dataset to the child dataset.
+
+        Group: /
+        │   Dimensions:        (time: 1, Z: 1000, nv: 2)
+        │   Coordinates:
+        |       time: (time)    float32 0.5
+        |       Z:    (Z)       float32 -0.0 -1.0 -2.0 ...
+        │   Data variables:
+        │       Pressure  (Z)   float32 ...
+        |       time_bnds (time, nv) float32 ...
+        └── Group: /SimpleGroup
+            │   Dimensions:      (time: 1, Z: 1000, nv: 2, Y: 40, X: 40)
+            │   Coordinates:
+            |      Y:   (Y)     int16 1 2 3 4 ...
+            |      X:   (X)     int16 1 2 3 4 ...
+            |   Inherited coordinates:
+            |      time: (time)    float32 0.5
+            |      Z:    (Z)       float32 -0.0 -1.0 -2.0 ...
+            │   Data variables:
+            │       Temperature  (time, Z, Y, X) float32 ...
+            |       Salinity     (time, Z, Y, X) float32 ...
+        """
+        tree = open_datatree(url, engine=self.engine)
+        assert set(tree.dims) == {"time", "Z", "nv"}
+        assert tree["/SimpleGroup"].coords["time"].dims == ("time",)
+        assert tree["/SimpleGroup"].coords["Z"].dims == ("Z",)
+        assert tree["/SimpleGroup"].coords["Y"].dims == ("Y",)
+        assert tree["/SimpleGroup"].coords["X"].dims == ("X",)
+        with xr.open_dataset(url, engine=self.engine, group="/SimpleGroup") as expected:
+            assert set(tree["/SimpleGroup"].dims) == set(
+                list(expected.dims) + ["Z", "nv"]
+            )
+
+    def test_open_groups_to_dict(self, url=all_aligned_child_nodes_url) -> None:
+        aligned_dict_of_datasets = open_groups(url, engine=self.engine)
+        aligned_dt = DataTree.from_dict(aligned_dict_of_datasets)
+        with open_datatree(url, engine=self.engine) as opened_tree:
+            assert opened_tree.identical(aligned_dt)
+
 
 @requires_zarr
 @parametrize_zarr_format
 class TestZarrDatatreeIO:
     engine = "zarr"
 
-    def test_to_zarr(self, tmpdir, simple_datatree, zarr_format):
+    def test_to_zarr(self, tmpdir, simple_datatree, zarr_format) -> None:
         filepath = str(tmpdir / "test.zarr")
         original_dt = simple_datatree
         original_dt.to_zarr(filepath, zarr_format=zarr_format)
@@ -459,7 +670,10 @@ class TestZarrDatatreeIO:
         with open_datatree(filepath, engine="zarr") as roundtrip_dt:
             assert_equal(original_dt, roundtrip_dt)
 
-    def test_zarr_encoding(self, tmpdir, simple_datatree, zarr_format):
+    @pytest.mark.filterwarnings(
+        "ignore:Numcodecs codecs are not in the Zarr version 3 specification"
+    )
+    def test_zarr_encoding(self, tmpdir, simple_datatree, zarr_format) -> None:
         filepath = str(tmpdir / "test.zarr")
         original_dt = simple_datatree
 
@@ -470,11 +684,12 @@ class TestZarrDatatreeIO:
             comp = {"compressors": (codec,)} if has_zarr_v3 else {"compressor": codec}
         elif zarr_format == 3:
             # specifying codecs in zarr_format=3 requires importing from zarr 3 namespace
-            import numcodecs.zarr3
+            from zarr.registry import get_codec_class
 
-            comp = {"compressors": (numcodecs.zarr3.Blosc(cname="zstd", clevel=3),)}
+            Blosc = get_codec_class("numcodecs.blosc")
+            comp = {"compressors": (Blosc(cname="zstd", clevel=3),)}  # type: ignore[call-arg]
 
-        enc = {"/set2": {var: comp for var in original_dt["/set2"].dataset.data_vars}}
+        enc = {"/set2": dict.fromkeys(original_dt["/set2"].dataset.data_vars, comp)}
         original_dt.to_zarr(filepath, encoding=enc, zarr_format=zarr_format)
 
         with open_datatree(filepath, engine="zarr") as roundtrip_dt:
@@ -484,12 +699,12 @@ class TestZarrDatatreeIO:
             )
 
             enc["/not/a/group"] = {"foo": "bar"}  # type: ignore[dict-item]
-            with pytest.raises(ValueError, match="unexpected encoding group.*"):
-                original_dt.to_zarr(
-                    filepath, encoding=enc, engine="zarr", zarr_format=zarr_format
-                )
+            with pytest.raises(ValueError, match=r"unexpected encoding group.*"):
+                original_dt.to_zarr(filepath, encoding=enc, zarr_format=zarr_format)
 
-    def test_to_zarr_zip_store(self, tmpdir, simple_datatree, zarr_format):
+    @pytest.mark.xfail(reason="upstream zarr read-only changes have broken this test")
+    @pytest.mark.filterwarnings("ignore:Duplicate name")
+    def test_to_zarr_zip_store(self, tmpdir, simple_datatree, zarr_format) -> None:
         from zarr.storage import ZipStore
 
         filepath = str(tmpdir / "test.zarr.zip")
@@ -500,7 +715,9 @@ class TestZarrDatatreeIO:
         with open_datatree(store, engine="zarr") as roundtrip_dt:  # type: ignore[arg-type, unused-ignore]
             assert_equal(original_dt, roundtrip_dt)
 
-    def test_to_zarr_not_consolidated(self, tmpdir, simple_datatree, zarr_format):
+    def test_to_zarr_not_consolidated(
+        self, tmpdir, simple_datatree, zarr_format
+    ) -> None:
         filepath = tmpdir / "test.zarr"
         zmetadata = filepath / ".zmetadata"
         s1zmetadata = filepath / "set1" / ".zmetadata"
@@ -514,7 +731,9 @@ class TestZarrDatatreeIO:
             with open_datatree(filepath, engine="zarr") as roundtrip_dt:
                 assert_equal(original_dt, roundtrip_dt)
 
-    def test_to_zarr_default_write_mode(self, tmpdir, simple_datatree, zarr_format):
+    def test_to_zarr_default_write_mode(
+        self, tmpdir, simple_datatree, zarr_format
+    ) -> None:
         simple_datatree.to_zarr(str(tmpdir), zarr_format=zarr_format)
 
         import zarr
@@ -531,12 +750,14 @@ class TestZarrDatatreeIO:
     @requires_dask
     def test_to_zarr_compute_false(
         self, tmp_path: Path, simple_datatree: DataTree, zarr_format: Literal[2, 3]
-    ):
+    ) -> None:
         import dask.array as da
 
         storepath = tmp_path / "test.zarr"
         original_dt = simple_datatree.chunk()
-        original_dt.to_zarr(str(storepath), compute=False, zarr_format=zarr_format)
+        result = original_dt.to_zarr(
+            str(storepath), compute=False, zarr_format=zarr_format
+        )
 
         def assert_expected_zarr_files_exist(
             arr_dir: Path,
@@ -590,7 +811,7 @@ class TestZarrDatatreeIO:
             # inherited variables aren't meant to be written to zarr
             local_node_variables = node.to_dataset(inherit=False).variables
             for name, var in local_node_variables.items():
-                var_dir = storepath / node.path.removeprefix("/") / name
+                var_dir = storepath / node.path.removeprefix("/") / name  # type: ignore[operator]
 
                 assert_expected_zarr_files_exist(
                     arr_dir=var_dir,
@@ -607,6 +828,49 @@ class TestZarrDatatreeIO:
                     zarr_format=zarr_format,
                 )
 
+        in_progress_dt = load_datatree(str(storepath), engine="zarr")
+        assert not in_progress_dt.equals(original_dt)
+
+        result.compute()
+        written_dt = load_datatree(str(storepath), engine="zarr")
+        assert_identical(written_dt, original_dt)
+
+    @requires_dask
+    def test_rplus_mode(
+        self, tmp_path: Path, simple_datatree: DataTree, zarr_format: Literal[2, 3]
+    ) -> None:
+        storepath = tmp_path / "test.zarr"
+        original_dt = simple_datatree.chunk()
+        original_dt.to_zarr(storepath, compute=False, zarr_format=zarr_format)
+        original_dt.to_zarr(storepath, mode="r+")
+        with open_datatree(str(storepath), engine="zarr") as written_dt:
+            assert_identical(written_dt, original_dt)
+
+    @requires_dask
+    def test_to_zarr_no_redundant_computation(self, tmpdir, zarr_format) -> None:
+        import dask.array as da
+
+        eval_count = 0
+
+        def expensive_func(x):
+            nonlocal eval_count
+            eval_count += 1
+            return x + 1
+
+        base = da.random.random((), chunks=())
+        derived1 = da.map_blocks(expensive_func, base, meta=np.array((), np.float64))
+        derived2 = derived1 + 1  # depends on derived1
+        tree = DataTree.from_dict(
+            {
+                "group1": xr.Dataset({"derived": derived1}),
+                "group2": xr.Dataset({"derived": derived2}),
+            }
+        )
+
+        filepath = str(tmpdir / "test.zarr")
+        tree.to_zarr(filepath, zarr_format=zarr_format)
+        assert eval_count == 1  # not 2
+
     def test_to_zarr_inherited_coords(self, tmpdir, zarr_format):
         original_dt = DataTree.from_dict(
             {
@@ -640,8 +904,9 @@ class TestZarrDatatreeIO:
     @pytest.mark.filterwarnings(
         "ignore:Failed to open Zarr store with consolidated metadata:RuntimeWarning"
     )
-    def test_open_datatree(self, unaligned_datatree_zarr_factory, zarr_format) -> None:
-        """Test if `open_datatree` fails to open a zarr store with an unaligned group hierarchy."""
+    def test_open_datatree_unaligned_hierarchy(
+        self, unaligned_datatree_zarr_factory, zarr_format
+    ) -> None:
         storepath = unaligned_datatree_zarr_factory(zarr_format=zarr_format)
 
         with pytest.raises(
@@ -759,7 +1024,7 @@ class TestZarrDatatreeIO:
         for ds in dict_of_datasets.values():
             ds.close()
 
-    def test_write_subgroup(self, tmpdir, zarr_format):
+    def test_write_subgroup(self, tmpdir, zarr_format) -> None:
         original_dt = DataTree.from_dict(
             {
                 "/": xr.Dataset(coords={"x": [1, 2, 3]}),
@@ -780,7 +1045,7 @@ class TestZarrDatatreeIO:
     @pytest.mark.filterwarnings(
         "ignore:Failed to open Zarr store with consolidated metadata:RuntimeWarning"
     )
-    def test_write_inherited_coords_false(self, tmpdir, zarr_format):
+    def test_write_inherited_coords_false(self, tmpdir, zarr_format) -> None:
         original_dt = DataTree.from_dict(
             {
                 "/": xr.Dataset(coords={"x": [1, 2, 3]}),
@@ -804,7 +1069,7 @@ class TestZarrDatatreeIO:
     @pytest.mark.filterwarnings(
         "ignore:Failed to open Zarr store with consolidated metadata:RuntimeWarning"
     )
-    def test_write_inherited_coords_true(self, tmpdir, zarr_format):
+    def test_write_inherited_coords_true(self, tmpdir, zarr_format) -> None:
         original_dt = DataTree.from_dict(
             {
                 "/": xr.Dataset(coords={"x": [1, 2, 3]}),
@@ -824,3 +1089,27 @@ class TestZarrDatatreeIO:
         expected_child.name = None
         with open_datatree(filepath, group="child", engine="zarr") as roundtrip_child:
             assert_identical(expected_child, roundtrip_child)
+
+    @pytest.mark.xfail(
+        ON_WINDOWS,
+        reason="Permission errors from Zarr: https://github.com/pydata/xarray/pull/10793",
+    )
+    @pytest.mark.filterwarnings(
+        "ignore:Failed to open Zarr store with consolidated metadata:RuntimeWarning"
+    )
+    def test_zarr_engine_recognised(self, tmpdir, zarr_format) -> None:
+        """Test that xarray can guess the zarr backend when the engine is not specified"""
+        original_dt = DataTree.from_dict(
+            {
+                "/": xr.Dataset(coords={"x": [1, 2, 3]}),
+                "/child": xr.Dataset({"foo": ("x", [4, 5, 6])}),
+            }
+        )
+
+        filepath = str(tmpdir / "test.zarr")
+        original_dt.to_zarr(
+            filepath, write_inherited_coords=True, zarr_format=zarr_format
+        )
+
+        with open_datatree(filepath) as roundtrip_dt:
+            assert_identical(original_dt, roundtrip_dt)
diff -pruN 2025.03.1-8/xarray/tests/test_backends_file_manager.py 2025.10.1-1/xarray/tests/test_backends_file_manager.py
--- 2025.03.1-8/xarray/tests/test_backends_file_manager.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/xarray/tests/test_backends_file_manager.py	2025-10-10 10:38:05.000000000 +0000
@@ -7,7 +7,7 @@ from unittest import mock
 
 import pytest
 
-from xarray.backends.file_manager import CachingFileManager
+from xarray.backends.file_manager import CachingFileManager, PickleableFileManager
 from xarray.backends.lru_cache import LRUCache
 from xarray.core.options import set_options
 from xarray.tests import assert_no_warnings
@@ -262,3 +262,30 @@ def test_file_manager_acquire_context(tm
     assert file_cache  # file *was* already open
 
     manager.close()
+
+
+def test_pickleable_file_manager_write_pickle(tmpdir) -> None:
+    path = str(tmpdir.join("testing.txt"))
+    manager = PickleableFileManager(open, path, mode="w")
+    f = manager.acquire()
+    f.write("foo")
+    f.flush()
+    manager2 = pickle.loads(pickle.dumps(manager))
+    f2 = manager2.acquire()
+    f2.write("bar")
+    manager2.close()
+    manager.close()
+
+    with open(path) as f:
+        assert f.read() == "foobar"
+
+
+def test_pickleable_file_manager_preserves_closed(tmpdir) -> None:
+    path = str(tmpdir.join("testing.txt"))
+    manager = PickleableFileManager(open, path, mode="w")
+    f = manager.acquire()
+    f.write("foo")
+    manager.close()
+    manager2 = pickle.loads(pickle.dumps(manager))
+    assert manager2._closed
+    assert repr(manager2) == "<closed PickleableFileManager>"
diff -pruN 2025.03.1-8/xarray/tests/test_calendar_ops.py 2025.10.1-1/xarray/tests/test_calendar_ops.py
--- 2025.03.1-8/xarray/tests/test_calendar_ops.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/xarray/tests/test_calendar_ops.py	2025-10-10 10:38:05.000000000 +0000
@@ -4,7 +4,7 @@ import numpy as np
 import pandas as pd
 import pytest
 
-from xarray import CFTimeIndex, DataArray, infer_freq
+from xarray import CFTimeIndex, DataArray, Dataset, infer_freq
 from xarray.coding.calendar_ops import convert_calendar, interp_calendar
 from xarray.coding.cftime_offsets import date_range
 from xarray.testing import assert_identical
@@ -63,6 +63,24 @@ def test_convert_calendar(source, target
     np.testing.assert_array_equal(conv.time, expected_times)
 
 
+def test_convert_calendar_dataset():
+    # Check that variables without a time dimension are not modified
+    src = DataArray(
+        date_range("2004-01-01", "2004-12-31", freq="D", calendar="standard"),
+        dims=("time",),
+        name="time",
+    )
+    da_src = DataArray(
+        np.linspace(0, 1, src.size), dims=("time",), coords={"time": src}
+    ).expand_dims(lat=[0, 1])
+    ds_src = Dataset({"hastime": da_src, "notime": (("lat",), [0, 1])})
+
+    conv = convert_calendar(ds_src, "360_day", align_on="date")
+
+    assert conv.time.dt.calendar == "360_day"
+    assert_identical(ds_src.notime, conv.notime)
+
+
 @pytest.mark.parametrize(
     "source,target,freq",
     [
@@ -217,10 +235,24 @@ def test_convert_calendar_errors():
 
     # Datetime objects
     da = DataArray([0, 1, 2], dims=("x",), name="x")
-    with pytest.raises(ValueError, match="Coordinate x must contain datetime objects."):
+    with pytest.raises(
+        ValueError, match=r"Coordinate x must contain datetime objects."
+    ):
         convert_calendar(da, "standard", dim="x")
 
 
+def test_convert_calendar_dimension_name():
+    src = DataArray(
+        date_range("2004-01-01", "2004-01-31", freq="D", calendar="noleap"),
+        dims=("date",),
+        name="date",
+    )
+
+    out = convert_calendar(src, "proleptic_gregorian", dim="date")
+
+    np.testing.assert_array_equal(src, out)
+
+
 def test_convert_calendar_same_calendar():
     src = DataArray(
         date_range("2000-01-01", periods=12, freq="6h", use_cftime=False),
@@ -284,7 +316,7 @@ def test_interp_calendar_errors():
     da2 = da1 + 1
 
     with pytest.raises(
-        ValueError, match="Both 'source.x' and 'target' must contain datetime objects."
+        ValueError, match=r"Both 'source.x' and 'target' must contain datetime objects."
     ):
         interp_calendar(da1, da2, dim="x")
 
diff -pruN 2025.03.1-8/xarray/tests/test_cftime_offsets.py 2025.10.1-1/xarray/tests/test_cftime_offsets.py
--- 2025.03.1-8/xarray/tests/test_cftime_offsets.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/xarray/tests/test_cftime_offsets.py	2025-10-10 10:38:05.000000000 +0000
@@ -1,7 +1,7 @@
 from __future__ import annotations
 
 import warnings
-from itertools import product
+from itertools import product, starmap
 from typing import TYPE_CHECKING, Literal
 
 import numpy as np
@@ -274,7 +274,7 @@ def test_to_offset_annual(month_label, m
     freq = offset_str
     offset_type = _ANNUAL_OFFSET_TYPES[offset_str]
     if month_label:
-        freq = "-".join([freq, month_label])
+        freq = f"{freq}-{month_label}"
     if multiple:
         freq = f"{multiple}{freq}"
     result = to_offset(freq)
@@ -303,7 +303,7 @@ def test_to_offset_quarter(month_label,
     freq = offset_str
     offset_type = _QUARTER_OFFSET_TYPES[offset_str]
     if month_label:
-        freq = "-".join([freq, month_label])
+        freq = f"{freq}-{month_label}"
     if multiple:
         freq = f"{multiple}{freq}"
     result = to_offset(freq)
@@ -313,18 +313,16 @@ def test_to_offset_quarter(month_label,
     elif multiple:
         if month_int:
             expected = offset_type(n=multiple)
-        else:
-            if offset_type == QuarterBegin:
-                expected = offset_type(n=multiple, month=1)
-            elif offset_type == QuarterEnd:
-                expected = offset_type(n=multiple, month=12)
+        elif offset_type == QuarterBegin:
+            expected = offset_type(n=multiple, month=1)
+        elif offset_type == QuarterEnd:
+            expected = offset_type(n=multiple, month=12)
     elif month_int:
         expected = offset_type(month=month_int)
-    else:
-        if offset_type == QuarterBegin:
-            expected = offset_type(month=1)
-        elif offset_type == QuarterEnd:
-            expected = offset_type(month=12)
+    elif offset_type == QuarterBegin:
+        expected = offset_type(month=1)
+    elif offset_type == QuarterEnd:
+        expected = offset_type(month=12)
     assert result == expected
 
 
@@ -447,7 +445,6 @@ _MUL_TESTS = [
     (Second(), 3, Second(n=3)),
     (Millisecond(), 3, Millisecond(n=3)),
     (Microsecond(), 3, Microsecond(n=3)),
-    (Day(), 0.5, Hour(n=12)),
     (Hour(), 0.5, Minute(n=30)),
     (Hour(), -0.5, Minute(n=-30)),
     (Minute(), 0.5, Second(n=30)),
@@ -474,7 +471,15 @@ def test_mul_float_multiple_next_higher_
 
 @pytest.mark.parametrize(
     "offset",
-    [YearBegin(), YearEnd(), QuarterBegin(), QuarterEnd(), MonthBegin(), MonthEnd()],
+    [
+        YearBegin(),
+        YearEnd(),
+        QuarterBegin(),
+        QuarterEnd(),
+        MonthBegin(),
+        MonthEnd(),
+        Day(),
+    ],
     ids=_id_func,
 )
 def test_nonTick_offset_multiplied_float_error(offset):
@@ -536,6 +541,20 @@ def test_add_sub_monthly(offset, expecte
     assert result == expected
 
 
+def test_add_daily_offsets() -> None:
+    offset = Day(n=2)
+    expected = Day(n=4)
+    result = offset + offset
+    assert result == expected
+
+
+def test_subtract_daily_offsets() -> None:
+    offset = Day(n=2)
+    expected = Day(n=0)
+    result = offset - offset
+    assert result == expected
+
+
 @pytest.mark.parametrize(("offset", "expected_date_args"), _ADD_TESTS, ids=_id_func)
 def test_radd_sub_monthly(offset, expected_date_args, calendar):
     date_type = get_date_type(calendar)
@@ -1222,7 +1241,7 @@ def test_cftime_range(
     start, end, periods, freq, inclusive, normalize, calendar, expected_date_args
 ):
     date_type = get_date_type(calendar)
-    expected_dates = [date_type(*args) for args in expected_date_args]
+    expected_dates = list(starmap(date_type, expected_date_args))
 
     if isinstance(start, tuple):
         start = date_type(*start)
@@ -1279,7 +1298,7 @@ def test_invalid_date_range_cftime_input
     end: str | None,
     periods: int | None,
     freq: str | None,
-    inclusive: Literal["up", None],
+    inclusive: Literal["up"] | None,
 ) -> None:
     with pytest.raises(ValueError):
         date_range(start, end, periods, freq, inclusive=inclusive, use_cftime=True)  # type: ignore[arg-type]
@@ -1490,7 +1509,7 @@ def test_date_range_like_errors():
 
     with pytest.raises(
         ValueError,
-        match="`date_range_like` was unable to generate a range as the source frequency was not inferable.",
+        match=r"`date_range_like` was unable to generate a range as the source frequency was not inferable.",
     ):
         date_range_like(src, "gregorian")
 
@@ -1503,14 +1522,14 @@ def test_date_range_like_errors():
     )
     with pytest.raises(
         ValueError,
-        match="'source' must be a 1D array of datetime objects for inferring its range.",
+        match=r"'source' must be a 1D array of datetime objects for inferring its range.",
     ):
         date_range_like(src, "noleap")
 
     da = DataArray([1, 2, 3, 4], dims=("time",))
     with pytest.raises(
         ValueError,
-        match="'source' must be a 1D array of datetime objects for inferring its range.",
+        match=r"'source' must be a 1D array of datetime objects for inferring its range.",
     ):
         date_range_like(da, "noleap")
 
diff -pruN 2025.03.1-8/xarray/tests/test_cftimeindex.py 2025.10.1-1/xarray/tests/test_cftimeindex.py
--- 2025.03.1-8/xarray/tests/test_cftimeindex.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/xarray/tests/test_cftimeindex.py	2025-10-10 10:38:05.000000000 +0000
@@ -830,7 +830,6 @@ def test_cftimeindex_add_timedeltaindex(
 @pytest.mark.parametrize(
     "freq,units",
     [
-        ("D", "D"),
         ("h", "h"),
         ("min", "min"),
         ("s", "s"),
@@ -856,7 +855,7 @@ def test_cftimeindex_shift_float_us() ->
 
 
 @requires_cftime
-@pytest.mark.parametrize("freq", ["YS", "YE", "QS", "QE", "MS", "ME"])
+@pytest.mark.parametrize("freq", ["YS", "YE", "QS", "QE", "MS", "ME", "D"])
 def test_cftimeindex_shift_float_fails_for_non_tick_freqs(freq) -> None:
     a = xr.date_range("2000", periods=3, freq="D", use_cftime=True)
     with pytest.raises(TypeError, match="unsupported operand type"):
@@ -1403,17 +1402,17 @@ def test_asi8_empty_cftimeindex():
 
 @requires_cftime
 def test_infer_freq_valid_types(time_unit: PDDatetimeUnitOptions) -> None:
-    cf_indx = xr.date_range("2000-01-01", periods=3, freq="D", use_cftime=True)
-    assert xr.infer_freq(cf_indx) == "D"
-    assert xr.infer_freq(xr.DataArray(cf_indx)) == "D"
-
-    pd_indx = pd.date_range("2000-01-01", periods=3, freq="D").as_unit(time_unit)
-    assert xr.infer_freq(pd_indx) == "D"
-    assert xr.infer_freq(xr.DataArray(pd_indx)) == "D"
-
-    pd_td_indx = pd.timedelta_range(start="1D", periods=3, freq="D").as_unit(time_unit)
-    assert xr.infer_freq(pd_td_indx) == "D"
-    assert xr.infer_freq(xr.DataArray(pd_td_indx)) == "D"
+    cf_index = xr.date_range("2000-01-01", periods=3, freq="D", use_cftime=True)
+    assert xr.infer_freq(cf_index) == "D"
+    assert xr.infer_freq(xr.DataArray(cf_index)) == "D"
+
+    pd_index = pd.date_range("2000-01-01", periods=3, freq="D").as_unit(time_unit)
+    assert xr.infer_freq(pd_index) == "D"
+    assert xr.infer_freq(xr.DataArray(pd_index)) == "D"
+
+    pd_td_index = pd.timedelta_range(start="1D", periods=3, freq="D").as_unit(time_unit)
+    assert xr.infer_freq(pd_td_index) == "D"
+    assert xr.infer_freq(xr.DataArray(pd_td_index)) == "D"
 
 
 @requires_cftime
@@ -1422,27 +1421,27 @@ def test_infer_freq_invalid_inputs():
     with pytest.raises(ValueError, match="must contain datetime-like objects"):
         xr.infer_freq(xr.DataArray([0, 1, 2]))
 
-    indx = xr.date_range("1990-02-03", periods=4, freq="MS", use_cftime=True)
+    index = xr.date_range("1990-02-03", periods=4, freq="MS", use_cftime=True)
     # 2D DataArray
     with pytest.raises(ValueError, match="must be 1D"):
-        xr.infer_freq(xr.DataArray([indx, indx]))
+        xr.infer_freq(xr.DataArray([index, index]))
 
     # CFTimeIndex too short
     with pytest.raises(ValueError, match="Need at least 3 dates to infer frequency"):
-        xr.infer_freq(indx[:2])
+        xr.infer_freq(index[:2])
 
     # Non-monotonic input
-    assert xr.infer_freq(indx[np.array([0, 2, 1, 3])]) is None
+    assert xr.infer_freq(index[np.array([0, 2, 1, 3])]) is None
 
     # Non-unique input
-    assert xr.infer_freq(indx[np.array([0, 1, 1, 2])]) is None
+    assert xr.infer_freq(index[np.array([0, 1, 1, 2])]) is None
 
     # No unique frequency (here 1st step is MS, second is 2MS)
-    assert xr.infer_freq(indx[np.array([0, 1, 3])]) is None
+    assert xr.infer_freq(index[np.array([0, 1, 3])]) is None
 
     # Same, but for QS
-    indx = xr.date_range("1990-02-03", periods=4, freq="QS", use_cftime=True)
-    assert xr.infer_freq(indx[np.array([0, 1, 3])]) is None
+    index = xr.date_range("1990-02-03", periods=4, freq="QS", use_cftime=True)
+    assert xr.infer_freq(index[np.array([0, 1, 3])]) is None
 
 
 @requires_cftime
@@ -1466,10 +1465,10 @@ def test_infer_freq_invalid_inputs():
 )
 @pytest.mark.parametrize("calendar", _CFTIME_CALENDARS)
 def test_infer_freq(freq, calendar):
-    indx = xr.date_range(
+    index = xr.date_range(
         "2000-01-01", periods=3, freq=freq, calendar=calendar, use_cftime=True
     )
-    out = xr.infer_freq(indx)
+    out = xr.infer_freq(index)
     assert out == freq
 
 
diff -pruN 2025.03.1-8/xarray/tests/test_cftimeindex_resample.py 2025.10.1-1/xarray/tests/test_cftimeindex_resample.py
--- 2025.03.1-8/xarray/tests/test_cftimeindex_resample.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/xarray/tests/test_cftimeindex_resample.py	2025-10-10 10:38:05.000000000 +0000
@@ -6,12 +6,17 @@ from typing import TypedDict
 import numpy as np
 import pandas as pd
 import pytest
-from packaging.version import Version
 
 import xarray as xr
-from xarray.coding.cftime_offsets import _new_to_legacy_freq
+from xarray.coding.cftime_offsets import (
+    CFTIME_TICKS,
+    Day,
+    _new_to_legacy_freq,
+    to_offset,
+)
 from xarray.coding.cftimeindex import CFTimeIndex
 from xarray.core.resample_cftime import CFTimeGrouper
+from xarray.tests import has_pandas_3
 
 cftime = pytest.importorskip("cftime")
 
@@ -54,6 +59,20 @@ FREQS = [
 ]
 
 
+def has_tick_resample_freq(freqs):
+    resample_freq, _ = freqs
+    resample_freq_as_offset = to_offset(resample_freq)
+    return isinstance(resample_freq_as_offset, CFTIME_TICKS)
+
+
+def has_non_tick_resample_freq(freqs):
+    return not has_tick_resample_freq(freqs)
+
+
+FREQS_WITH_TICK_RESAMPLE_FREQ = list(filter(has_tick_resample_freq, FREQS))
+FREQS_WITH_NON_TICK_RESAMPLE_FREQ = list(filter(has_non_tick_resample_freq, FREQS))
+
+
 def compare_against_pandas(
     da_datetimeindex,
     da_cftimeindex,
@@ -110,22 +129,14 @@ def da(index) -> xr.DataArray:
     )
 
 
-@pytest.mark.parametrize("freqs", FREQS, ids=lambda x: "{}->{}".format(*x))
+@pytest.mark.parametrize(
+    "freqs", FREQS_WITH_TICK_RESAMPLE_FREQ, ids=lambda x: "{}->{}".format(*x)
+)
 @pytest.mark.parametrize("closed", [None, "left", "right"])
 @pytest.mark.parametrize("label", [None, "left", "right"])
 @pytest.mark.parametrize("offset", [None, "5s"], ids=lambda x: f"{x}")
-def test_resample(freqs, closed, label, offset) -> None:
+def test_resample_with_tick_resample_freq(freqs, closed, label, offset) -> None:
     initial_freq, resample_freq = freqs
-    if (
-        resample_freq == "4001D"
-        and closed == "right"
-        and Version(pd.__version__) < Version("2.2")
-    ):
-        pytest.skip(
-            "Pandas fixed a bug in this test case in version 2.2, which we "
-            "ported to xarray, so this test no longer produces the same "
-            "result as pandas for earlier pandas versions."
-        )
     start = "2000-01-01T12:07:01"
     origin = "start"
 
@@ -150,6 +161,43 @@ def test_resample(freqs, closed, label,
 
 
 @pytest.mark.parametrize(
+    "freqs", FREQS_WITH_NON_TICK_RESAMPLE_FREQ, ids=lambda x: "{}->{}".format(*x)
+)
+@pytest.mark.parametrize("closed", [None, "left", "right"])
+@pytest.mark.parametrize("label", [None, "left", "right"])
+def test_resample_with_non_tick_resample_freq(freqs, closed, label) -> None:
+    initial_freq, resample_freq = freqs
+    resample_freq_as_offset = to_offset(resample_freq)
+    if isinstance(resample_freq_as_offset, Day) and not has_pandas_3:
+        pytest.skip("Only valid for pandas >= 3.0")
+    start = "2000-01-01T12:07:01"
+
+    # Set offset and origin to their default values since they have no effect
+    # on resampling data with a non-tick resample frequency.
+    offset = None
+    origin = "start_day"
+
+    datetime_index = pd.date_range(
+        start=start, periods=5, freq=_new_to_legacy_freq(initial_freq)
+    )
+    cftime_index = xr.date_range(
+        start=start, periods=5, freq=initial_freq, use_cftime=True
+    )
+    da_datetimeindex = da(datetime_index)
+    da_cftimeindex = da(cftime_index)
+
+    compare_against_pandas(
+        da_datetimeindex,
+        da_cftimeindex,
+        resample_freq,
+        closed=closed,
+        label=label,
+        offset=offset,
+        origin=origin,
+    )
+
+
+@pytest.mark.parametrize(
     ("freq", "expected"),
     [
         ("s", "left"),
@@ -228,7 +276,7 @@ def test_invalid_offset_error(offset: st
     cftime_index = xr.date_range("2000", periods=5, use_cftime=True)
     da_cftime = da(cftime_index)
     with pytest.raises(ValueError, match="offset must be"):
-        da_cftime.resample(time="2D", offset=offset)  # type: ignore[arg-type]
+        da_cftime.resample(time="2h", offset=offset)  # type: ignore[arg-type]
 
 
 def test_timedelta_offset() -> None:
@@ -238,6 +286,15 @@ def test_timedelta_offset() -> None:
     cftime_index = xr.date_range("2000", periods=5, use_cftime=True)
     da_cftime = da(cftime_index)
 
-    timedelta_result = da_cftime.resample(time="2D", offset=timedelta).mean()
-    string_result = da_cftime.resample(time="2D", offset=string).mean()
+    timedelta_result = da_cftime.resample(time="2h", offset=timedelta).mean()
+    string_result = da_cftime.resample(time="2h", offset=string).mean()
     xr.testing.assert_identical(timedelta_result, string_result)
+
+
+@pytest.mark.parametrize(("option", "value"), [("offset", "5s"), ("origin", "start")])
+def test_non_tick_option_warning(option, value) -> None:
+    cftime_index = xr.date_range("2000", periods=5, use_cftime=True)
+    da_cftime = da(cftime_index)
+    kwargs = {option: value}
+    with pytest.warns(RuntimeWarning, match=option):
+        da_cftime.resample(time="ME", **kwargs)
diff -pruN 2025.03.1-8/xarray/tests/test_coarsen.py 2025.10.1-1/xarray/tests/test_coarsen.py
--- 2025.03.1-8/xarray/tests/test_coarsen.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/xarray/tests/test_coarsen.py	2025-10-10 10:38:05.000000000 +0000
@@ -63,14 +63,14 @@ def test_coarsen_coords(ds, dask):
         dims="time",
         coords={"time": pd.date_range("1999-12-15", periods=364)},
     )
-    actual = da.coarsen(time=2).mean()
+    actual = da.coarsen(time=2).mean()  # type: ignore[attr-defined]
 
 
 @requires_cftime
 def test_coarsen_coords_cftime():
     times = xr.date_range("2000", periods=6, use_cftime=True)
     da = xr.DataArray(range(6), [("time", times)])
-    actual = da.coarsen(time=3).mean()
+    actual = da.coarsen(time=3).mean()  # type: ignore[attr-defined]
     expected_times = xr.date_range("2000-01-02", freq="3D", periods=2, use_cftime=True)
     np.testing.assert_array_equal(actual.time, expected_times)
 
@@ -345,5 +345,5 @@ class TestCoarsenConstruct:
         assert list(da.coords) == list(result.coords)
 
         ds = da.to_dataset(name="T")
-        result = ds.coarsen(time=12).construct(time=("year", "month"))
-        assert list(da.coords) == list(result.coords)
+        ds_result = ds.coarsen(time=12).construct(time=("year", "month"))
+        assert list(da.coords) == list(ds_result.coords)
diff -pruN 2025.03.1-8/xarray/tests/test_coding.py 2025.10.1-1/xarray/tests/test_coding.py
--- 2025.03.1-8/xarray/tests/test_coding.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/xarray/tests/test_coding.py	2025-10-10 10:38:05.000000000 +0000
@@ -94,8 +94,8 @@ def test_coder_roundtrip() -> None:
     assert_identical(original, roundtripped)
 
 
-@pytest.mark.parametrize("dtype", "u1 u2 i1 i2 f2 f4".split())
-@pytest.mark.parametrize("dtype2", "f4 f8".split())
+@pytest.mark.parametrize("dtype", ["u1", "u2", "i1", "i2", "f2", "f4"])
+@pytest.mark.parametrize("dtype2", ["f4", "f8"])
 def test_scaling_converts_to_float(dtype: str, dtype2: str) -> None:
     dt = np.dtype(dtype2)
     original = xr.Variable(
diff -pruN 2025.03.1-8/xarray/tests/test_coding_strings.py 2025.10.1-1/xarray/tests/test_coding_strings.py
--- 2025.03.1-8/xarray/tests/test_coding_strings.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/xarray/tests/test_coding_strings.py	2025-10-10 10:38:05.000000000 +0000
@@ -139,6 +139,45 @@ def test_CharacterArrayCoder_char_dim_na
     assert roundtripped.dims[-1] == original.dims[-1]
 
 
+@pytest.mark.parametrize(
+    [
+        "original",
+        "expected_char_dim_name",
+        "expected_char_dim_length",
+        "warning_message",
+    ],
+    [
+        (
+            Variable(("x",), [b"ab", b"cde"], encoding={"char_dim_name": "foo4"}),
+            "foo3",
+            3,
+            "String dimension naming mismatch",
+        ),
+        (
+            Variable(
+                ("x",),
+                [b"ab", b"cde"],
+                encoding={"original_shape": (2, 4), "char_dim_name": "foo"},
+            ),
+            "foo3",
+            3,
+            "String dimension length mismatch",
+        ),
+    ],
+)
+def test_CharacterArrayCoder_dim_mismatch_warnings(
+    original, expected_char_dim_name, expected_char_dim_length, warning_message
+) -> None:
+    coder = strings.CharacterArrayCoder()
+    with pytest.warns(UserWarning, match=warning_message):
+        encoded = coder.encode(original)
+    roundtripped = coder.decode(encoded)
+    assert encoded.dims[-1] == expected_char_dim_name
+    assert encoded.sizes[expected_char_dim_name] == expected_char_dim_length
+    assert roundtripped.encoding["char_dim_name"] == expected_char_dim_name
+    assert roundtripped.dims[-1] == original.dims[-1]
+
+
 def test_StackedBytesArray() -> None:
     array = np.array([[b"a", b"b", b"c"], [b"d", b"e", b"f"]], dtype="S")
     actual = strings.StackedBytesArray(array)
diff -pruN 2025.03.1-8/xarray/tests/test_coding_times.py 2025.10.1-1/xarray/tests/test_coding_times.py
--- 2025.03.1-8/xarray/tests/test_coding_times.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/xarray/tests/test_coding_times.py	2025-10-10 10:38:05.000000000 +0000
@@ -2,7 +2,7 @@ from __future__ import annotations
 
 import warnings
 from datetime import datetime, timedelta
-from itertools import product
+from itertools import product, starmap
 from typing import Literal
 
 import numpy as np
@@ -238,8 +238,6 @@ def test_decode_non_standard_calendar_in
 def test_decode_dates_outside_timestamp_range(
     calendar, time_unit: PDDatetimeUnitOptions
 ) -> None:
-    from datetime import datetime
-
     import cftime
 
     units = "days since 0001-01-01"
@@ -378,8 +376,6 @@ def test_decode_nonstandard_calendar_mul
 def test_decode_multidim_time_outside_timestamp_range(
     calendar, time_unit: PDDatetimeUnitOptions
 ) -> None:
-    from datetime import datetime
-
     import cftime
 
     units = "days since 0001-01-01"
@@ -579,7 +575,7 @@ _CFTIME_DATETIME_UNITS_TESTS = [
 @pytest.mark.parametrize(("date_args", "expected"), _CFTIME_DATETIME_UNITS_TESTS)
 def test_infer_cftime_datetime_units(calendar, date_args, expected) -> None:
     date_type = _all_cftime_date_types()[calendar]
-    dates = [date_type(*args) for args in date_args]
+    dates = list(starmap(date_type, date_args))
     assert expected == infer_datetime_units(dates)
 
 
@@ -808,7 +804,7 @@ def calendar(request):
     return request.param
 
 
-@pytest.fixture()
+@pytest.fixture
 def times(calendar):
     import cftime
 
@@ -820,7 +816,7 @@ def times(calendar):
     )
 
 
-@pytest.fixture()
+@pytest.fixture
 def data(times):
     data = np.random.rand(2, 2, 4)
     lons = np.linspace(0, 11, 2)
@@ -830,7 +826,7 @@ def data(times):
     )
 
 
-@pytest.fixture()
+@pytest.fixture
 def times_3d(times):
     lons = np.linspace(0, 11, 2)
     lats = np.linspace(0, 20, 2)
@@ -1162,27 +1158,26 @@ def test__encode_datetime_with_cftime()
 
 
 @requires_cftime
-def test_encode_decode_cf_datetime_outofbounds_warnings(
+def test_round_trip_standard_calendar_cftime_datetimes_pre_reform() -> None:
+    from cftime import DatetimeGregorian
+
+    dates = np.array([DatetimeGregorian(1, 1, 1), DatetimeGregorian(2000, 1, 1)])
+    encoded = encode_cf_datetime(dates, "seconds since 2000-01-01", "standard")
+    with pytest.warns(SerializationWarning, match="Unable to decode time axis"):
+        decoded = decode_cf_datetime(*encoded)
+    np.testing.assert_equal(decoded, dates)
+
+
+@pytest.mark.parametrize("calendar", ["standard", "gregorian"])
+def test_encode_cf_datetime_gregorian_proleptic_gregorian_mismatch_error(
+    calendar: str,
     time_unit: PDDatetimeUnitOptions,
 ) -> None:
-    import cftime
-
     if time_unit == "ns":
-        pytest.skip("does not work work out of bounds datetimes")
+        pytest.skip("datetime64[ns] values can only be defined post reform")
     dates = np.array(["0001-01-01", "2001-01-01"], dtype=f"datetime64[{time_unit}]")
-    cfdates = np.array(
-        [
-            cftime.datetime(t0.year, t0.month, t0.day, calendar="gregorian")
-            for t0 in dates.astype(datetime)
-        ]
-    )
-    with pytest.warns(
-        SerializationWarning, match="Unable to encode numpy.datetime64 objects"
-    ):
-        encoded = encode_cf_datetime(dates, "seconds since 2000-01-01", "standard")
-    with pytest.warns(SerializationWarning, match="Unable to decode time axis"):
-        decoded = decode_cf_datetime(*encoded)
-    np.testing.assert_equal(decoded, cfdates)
+    with pytest.raises(ValueError, match="proleptic_gregorian"):
+        encode_cf_datetime(dates, "seconds since 2000-01-01", calendar)
 
 
 @pytest.mark.parametrize("calendar", ["gregorian", "Gregorian", "GREGORIAN"])
@@ -1215,7 +1210,7 @@ def test_should_cftime_be_used_source_ou
         "1000-01-01", periods=100, freq="MS", calendar="noleap", use_cftime=True
     )
     with pytest.raises(
-        ValueError, match="Source time range is not valid for numpy datetimes."
+        ValueError, match=r"Source time range is not valid for numpy datetimes."
     ):
         _should_cftime_be_used(src, "standard", False)
 
@@ -1226,7 +1221,7 @@ def test_should_cftime_be_used_target_no
         "2000-01-01", periods=100, freq="MS", calendar="noleap", use_cftime=True
     )
     with pytest.raises(
-        ValueError, match="Calendar 'noleap' is only valid with cftime."
+        ValueError, match=r"Calendar 'noleap' is only valid with cftime."
     ):
         _should_cftime_be_used(src, "noleap", False)
 
@@ -1388,7 +1383,7 @@ def test_contains_cftime_lazy() -> None:
 def test_roundtrip_datetime64_nanosecond_precision(
     timestr: str,
     format: Literal["ns", "us"],
-    dtype: np.typing.DTypeLike,
+    dtype: np.typing.DTypeLike | None,
     fill_value: int | float | None,
     use_encoding: bool,
     time_unit: PDDatetimeUnitOptions,
@@ -1504,7 +1499,7 @@ def test_roundtrip_datetime64_nanosecond
     [(np.int64, 20), (np.int64, np.iinfo(np.int64).min), (np.float64, 1e30)],
 )
 def test_roundtrip_timedelta64_nanosecond_precision(
-    dtype: np.typing.DTypeLike,
+    dtype: np.typing.DTypeLike | None,
     fill_value: int | float,
     time_unit: PDDatetimeUnitOptions,
 ) -> None:
@@ -1515,7 +1510,7 @@ def test_roundtrip_timedelta64_nanosecon
     timedelta_values[2] = nat
     timedelta_values[4] = nat
 
-    encoding = dict(dtype=dtype, _FillValue=fill_value)
+    encoding = dict(dtype=dtype, _FillValue=fill_value, units="nanoseconds")
     var = Variable(["time"], timedelta_values, encoding=encoding)
 
     encoded_var = conventions.encode_cf_variable(var)
@@ -1768,7 +1763,7 @@ def test_encode_cf_timedelta_via_dask(
 ) -> None:
     import dask.array
 
-    times_pd = pd.timedelta_range(start="0D", freq="D", periods=3, unit=time_unit)  # type: ignore[call-arg]
+    times_pd = pd.timedelta_range(start="0D", freq="D", periods=3, unit=time_unit)  # type: ignore[call-arg,unused-ignore]
     times = dask.array.from_array(times_pd, chunks=1)
     encoded_times, encoding_units = encode_cf_timedelta(times, units, dtype)
 
@@ -1828,8 +1823,9 @@ def test_encode_cf_timedelta_small_dtype
     assert_equal(variable, decoded)
 
 
-_DECODE_TIMEDELTA_TESTS = {
+_DECODE_TIMEDELTA_VIA_UNITS_TESTS = {
     "default": (True, None, np.dtype("timedelta64[ns]"), True),
+    "decode_timedelta=True": (True, True, np.dtype("timedelta64[ns]"), False),
     "decode_timedelta=False": (True, False, np.dtype("int64"), False),
     "inherit-time_unit-from-decode_times": (
         CFDatetimeCoder(time_unit="s"),
@@ -1860,17 +1856,21 @@ _DECODE_TIMEDELTA_TESTS = {
 
 @pytest.mark.parametrize(
     ("decode_times", "decode_timedelta", "expected_dtype", "warns"),
-    list(_DECODE_TIMEDELTA_TESTS.values()),
-    ids=list(_DECODE_TIMEDELTA_TESTS.keys()),
+    list(_DECODE_TIMEDELTA_VIA_UNITS_TESTS.values()),
+    ids=list(_DECODE_TIMEDELTA_VIA_UNITS_TESTS.keys()),
 )
-def test_decode_timedelta(
+def test_decode_timedelta_via_units(
     decode_times, decode_timedelta, expected_dtype, warns
 ) -> None:
     timedeltas = pd.timedelta_range(0, freq="D", periods=3)
-    var = Variable(["time"], timedeltas)
-    encoded = conventions.encode_cf_variable(var)
+    attrs = {"units": "days"}
+    var = Variable(["time"], timedeltas, encoding=attrs)
+    encoded = Variable(["time"], np.array([0, 1, 2]), attrs=attrs)
     if warns:
-        with pytest.warns(FutureWarning, match="decode_timedelta"):
+        with pytest.warns(
+            FutureWarning,
+            match="xarray will not decode the variable 'foo' into a timedelta64 dtype",
+        ):
             decoded = conventions.decode_cf_variable(
                 "foo",
                 encoded,
@@ -1888,6 +1888,68 @@ def test_decode_timedelta(
     assert decoded.dtype == expected_dtype
 
 
+_DECODE_TIMEDELTA_VIA_DTYPE_TESTS = {
+    "default": (True, None, "ns", np.dtype("timedelta64[ns]")),
+    "decode_timedelta=False": (True, False, "ns", np.dtype("int64")),
+    "decode_timedelta=True": (True, True, "ns", np.dtype("timedelta64[ns]")),
+    "use-original-units": (True, True, "s", np.dtype("timedelta64[s]")),
+    "inherit-time_unit-from-decode_times": (
+        CFDatetimeCoder(time_unit="s"),
+        None,
+        "ns",
+        np.dtype("timedelta64[s]"),
+    ),
+    "set-time_unit-via-CFTimedeltaCoder-decode_times=True": (
+        True,
+        CFTimedeltaCoder(time_unit="s"),
+        "ns",
+        np.dtype("timedelta64[s]"),
+    ),
+    "set-time_unit-via-CFTimedeltaCoder-decode_times=False": (
+        False,
+        CFTimedeltaCoder(time_unit="s"),
+        "ns",
+        np.dtype("timedelta64[s]"),
+    ),
+    "override-time_unit-from-decode_times": (
+        CFDatetimeCoder(time_unit="ns"),
+        CFTimedeltaCoder(time_unit="s"),
+        "ns",
+        np.dtype("timedelta64[s]"),
+    ),
+    "decode-different-units": (
+        True,
+        CFTimedeltaCoder(time_unit="us"),
+        "s",
+        np.dtype("timedelta64[us]"),
+    ),
+}
+
+
+@pytest.mark.parametrize(
+    ("decode_times", "decode_timedelta", "original_unit", "expected_dtype"),
+    list(_DECODE_TIMEDELTA_VIA_DTYPE_TESTS.values()),
+    ids=list(_DECODE_TIMEDELTA_VIA_DTYPE_TESTS.keys()),
+)
+def test_decode_timedelta_via_dtype(
+    decode_times, decode_timedelta, original_unit, expected_dtype
+) -> None:
+    timedeltas = pd.timedelta_range(0, freq="D", periods=3, unit=original_unit)  # type: ignore[call-arg,unused-ignore]
+    encoding = {"units": "days"}
+    var = Variable(["time"], timedeltas, encoding=encoding)
+    encoded = conventions.encode_cf_variable(var)
+    assert encoded.attrs["dtype"] == f"timedelta64[{original_unit}]"
+    assert encoded.attrs["units"] == encoding["units"]
+    decoded = conventions.decode_cf_variable(
+        "foo", encoded, decode_times=decode_times, decode_timedelta=decode_timedelta
+    )
+    if decode_timedelta is False:
+        assert_equal(encoded, decoded)
+    else:
+        assert_equal(var, decoded)
+    assert decoded.dtype == expected_dtype
+
+
 def test_lazy_decode_timedelta_unexpected_dtype() -> None:
     attrs = {"units": "seconds"}
     encoded = Variable(["time"], [0, 0.5, 1], attrs=attrs)
@@ -1943,7 +2005,12 @@ def test_duck_array_decode_times(calenda
 def test_decode_timedelta_mask_and_scale(
     decode_timedelta: bool, mask_and_scale: bool
 ) -> None:
-    attrs = {"units": "nanoseconds", "_FillValue": np.int16(-1), "add_offset": 100000.0}
+    attrs = {
+        "dtype": "timedelta64[ns]",
+        "units": "nanoseconds",
+        "_FillValue": np.int16(-1),
+        "add_offset": 100000.0,
+    }
     encoded = Variable(["time"], np.array([0, -1, 1], "int16"), attrs=attrs)
     decoded = conventions.decode_cf_variable(
         "foo", encoded, mask_and_scale=mask_and_scale, decode_timedelta=decode_timedelta
@@ -1959,3 +2026,196 @@ def test_decode_floating_point_timedelta
     decoded = conventions.decode_cf_variable("foo", encoded, decode_timedelta=True)
     with assert_no_warnings():
         decoded.load()
+
+
+def test_timedelta64_coding_via_dtype(time_unit: PDDatetimeUnitOptions) -> None:
+    timedeltas = np.array([0, 1, "NaT"], dtype=f"timedelta64[{time_unit}]")
+    variable = Variable(["time"], timedeltas)
+    expected_units = _numpy_to_netcdf_timeunit(time_unit)
+
+    encoded = conventions.encode_cf_variable(variable)
+    assert encoded.attrs["dtype"] == f"timedelta64[{time_unit}]"
+    assert encoded.attrs["units"] == expected_units
+
+    decoded = conventions.decode_cf_variable("timedeltas", encoded)
+    assert decoded.encoding["dtype"] == np.dtype("int64")
+    assert decoded.encoding["units"] == expected_units
+
+    assert_identical(decoded, variable)
+    assert decoded.dtype == variable.dtype
+
+    reencoded = conventions.encode_cf_variable(decoded)
+    assert_identical(reencoded, encoded)
+    assert reencoded.dtype == encoded.dtype
+
+
+def test_timedelta_coding_via_dtype_non_pandas_coarse_resolution_warning() -> None:
+    attrs = {"dtype": "timedelta64[D]", "units": "days"}
+    encoded = Variable(["time"], [0, 1, 2], attrs=attrs)
+    with pytest.warns(UserWarning, match="xarray only supports"):
+        decoded = conventions.decode_cf_variable("timedeltas", encoded)
+    expected_array = np.array([0, 1, 2], dtype="timedelta64[D]")
+    expected_array = expected_array.astype("timedelta64[s]")
+    expected = Variable(["time"], expected_array)
+    assert_identical(decoded, expected)
+    assert decoded.dtype == np.dtype("timedelta64[s]")
+
+
+@pytest.mark.xfail(reason="xarray does not recognize picoseconds as time-like")
+def test_timedelta_coding_via_dtype_non_pandas_fine_resolution_warning() -> None:
+    attrs = {"dtype": "timedelta64[ps]", "units": "picoseconds"}
+    encoded = Variable(["time"], [0, 1000, 2000], attrs=attrs)
+    with pytest.warns(UserWarning, match="xarray only supports"):
+        decoded = conventions.decode_cf_variable("timedeltas", encoded)
+    expected_array = np.array([0, 1000, 2000], dtype="timedelta64[ps]")
+    expected_array = expected_array.astype("timedelta64[ns]")
+    expected = Variable(["time"], expected_array)
+    assert_identical(decoded, expected)
+    assert decoded.dtype == np.dtype("timedelta64[ns]")
+
+
+def test_timedelta_decode_via_dtype_invalid_encoding() -> None:
+    attrs = {"dtype": "timedelta64[s]", "units": "seconds"}
+    encoding = {"units": "foo"}
+    encoded = Variable(["time"], [0, 1, 2], attrs=attrs, encoding=encoding)
+    with pytest.raises(ValueError, match="failed to prevent"):
+        conventions.decode_cf_variable("timedeltas", encoded)
+
+
+@pytest.mark.parametrize("attribute", ["dtype", "units"])
+def test_timedelta_encode_via_dtype_invalid_attribute(attribute) -> None:
+    timedeltas = pd.timedelta_range(0, freq="D", periods=3)
+    attrs = {attribute: "foo"}
+    variable = Variable(["time"], timedeltas, attrs=attrs)
+    with pytest.raises(ValueError, match="failed to prevent"):
+        conventions.encode_cf_variable(variable)
+
+
+@pytest.mark.parametrize(
+    ("decode_via_units", "decode_via_dtype", "attrs", "expect_timedelta64"),
+    [
+        (True, True, {"units": "seconds"}, True),
+        (True, False, {"units": "seconds"}, True),
+        (False, True, {"units": "seconds"}, False),
+        (False, False, {"units": "seconds"}, False),
+        (True, True, {"dtype": "timedelta64[s]", "units": "seconds"}, True),
+        (True, False, {"dtype": "timedelta64[s]", "units": "seconds"}, True),
+        (False, True, {"dtype": "timedelta64[s]", "units": "seconds"}, True),
+        (False, False, {"dtype": "timedelta64[s]", "units": "seconds"}, False),
+    ],
+    ids=lambda x: f"{x!r}",
+)
+def test_timedelta_decoding_options(
+    decode_via_units, decode_via_dtype, attrs, expect_timedelta64
+) -> None:
+    array = np.array([0, 1, 2], dtype=np.dtype("int64"))
+    encoded = Variable(["time"], array, attrs=attrs)
+
+    # Confirm we decode to the expected dtype.
+    decode_timedelta = CFTimedeltaCoder(
+        time_unit="s",
+        decode_via_units=decode_via_units,
+        decode_via_dtype=decode_via_dtype,
+    )
+    decoded = conventions.decode_cf_variable(
+        "foo", encoded, decode_timedelta=decode_timedelta
+    )
+    if expect_timedelta64:
+        assert decoded.dtype == np.dtype("timedelta64[s]")
+    else:
+        assert decoded.dtype == np.dtype("int64")
+
+    # Confirm we exactly roundtrip.
+    reencoded = conventions.encode_cf_variable(decoded)
+
+    expected = encoded.copy()
+    if "dtype" not in attrs and decode_via_units:
+        expected.attrs["dtype"] = "timedelta64[s]"
+    assert_identical(reencoded, expected)
+
+
+def test_timedelta_encoding_explicit_non_timedelta64_dtype() -> None:
+    encoding = {"dtype": np.dtype("int32")}
+    timedeltas = pd.timedelta_range(0, freq="D", periods=3)
+    variable = Variable(["time"], timedeltas, encoding=encoding)
+
+    encoded = conventions.encode_cf_variable(variable)
+    assert encoded.attrs["units"] == "days"
+    assert encoded.attrs["dtype"] == "timedelta64[ns]"
+    assert encoded.dtype == np.dtype("int32")
+
+    decoded = conventions.decode_cf_variable("foo", encoded)
+    assert_identical(decoded, variable)
+
+    reencoded = conventions.encode_cf_variable(decoded)
+    assert_identical(reencoded, encoded)
+    assert encoded.attrs["units"] == "days"
+    assert encoded.attrs["dtype"] == "timedelta64[ns]"
+    assert encoded.dtype == np.dtype("int32")
+
+
+@pytest.mark.parametrize("mask_attribute", ["_FillValue", "missing_value"])
+def test_timedelta64_coding_via_dtype_with_mask(
+    time_unit: PDDatetimeUnitOptions, mask_attribute: str
+) -> None:
+    timedeltas = np.array([0, 1, "NaT"], dtype=f"timedelta64[{time_unit}]")
+    mask = 10
+    variable = Variable(["time"], timedeltas, encoding={mask_attribute: mask})
+    expected_dtype = f"timedelta64[{time_unit}]"
+    expected_units = _numpy_to_netcdf_timeunit(time_unit)
+
+    encoded = conventions.encode_cf_variable(variable)
+    assert encoded.attrs["dtype"] == expected_dtype
+    assert encoded.attrs["units"] == expected_units
+    assert encoded.attrs[mask_attribute] == mask
+    assert encoded[-1] == mask
+
+    decoded = conventions.decode_cf_variable("timedeltas", encoded)
+    assert decoded.encoding["dtype"] == np.dtype("int64")
+    assert decoded.encoding["units"] == expected_units
+    assert decoded.encoding[mask_attribute] == mask
+    assert np.isnat(decoded[-1])
+
+    assert_identical(decoded, variable)
+    assert decoded.dtype == variable.dtype
+
+    reencoded = conventions.encode_cf_variable(decoded)
+    assert_identical(reencoded, encoded)
+    assert reencoded.dtype == encoded.dtype
+
+
+def test_roundtrip_0size_timedelta(time_unit: PDDatetimeUnitOptions) -> None:
+    # regression test for GitHub issue #10310
+    encoding = {"units": "days", "dtype": np.dtype("int64")}
+    data = np.array([], dtype=f"=m8[{time_unit}]")
+    decoded = Variable(["time"], data, encoding=encoding)
+    encoded = conventions.encode_cf_variable(decoded, name="foo")
+    assert encoded.dtype == encoding["dtype"]
+    assert encoded.attrs["units"] == encoding["units"]
+    decoded = conventions.decode_cf_variable("foo", encoded, decode_timedelta=True)
+    assert decoded.dtype == np.dtype(f"=m8[{time_unit}]")
+    with assert_no_warnings():
+        decoded.load()
+    assert decoded.dtype == np.dtype("=m8[s]")
+    assert decoded.encoding == encoding
+
+
+def test_roundtrip_empty_datetime64_array(time_unit: PDDatetimeUnitOptions) -> None:
+    # Regression test for GitHub issue #10722.
+    encoding = {
+        "units": "days since 1990-1-1",
+        "dtype": np.dtype("float64"),
+        "calendar": "standard",
+    }
+    times = date_range("2000", periods=0, unit=time_unit)
+    variable = Variable(["time"], times, encoding=encoding)
+
+    encoded = conventions.encode_cf_variable(variable, name="foo")
+    assert encoded.dtype == np.dtype("float64")
+
+    decode_times = CFDatetimeCoder(time_unit=time_unit)
+    roundtripped = conventions.decode_cf_variable(
+        "foo", encoded, decode_times=decode_times
+    )
+    assert_identical(variable, roundtripped)
+    assert roundtripped.dtype == variable.dtype
diff -pruN 2025.03.1-8/xarray/tests/test_combine.py 2025.10.1-1/xarray/tests/test_combine.py
--- 2025.03.1-8/xarray/tests/test_combine.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/xarray/tests/test_combine.py	2025-10-10 10:38:05.000000000 +0000
@@ -13,6 +13,7 @@ from xarray import (
     combine_nested,
     concat,
     merge,
+    set_options,
 )
 from xarray.core import dtypes
 from xarray.structure.combine import (
@@ -27,7 +28,7 @@ from xarray.tests import assert_equal, a
 from xarray.tests.test_dataset import create_test_data
 
 
-def assert_combined_tile_ids_equal(dict1, dict2):
+def assert_combined_tile_ids_equal(dict1: dict, dict2: dict) -> None:
     assert len(dict1) == len(dict2)
     for k in dict1.keys():
         assert k in dict2.keys()
@@ -40,7 +41,9 @@ class TestTileIDsFromNestedList:
         input = [ds(0), ds(1)]
 
         expected = {(0,): ds(0), (1,): ds(1)}
-        actual = _infer_concat_order_from_positions(input)
+        actual: dict[tuple[int, ...], Dataset] = _infer_concat_order_from_positions(
+            input
+        )
         assert_combined_tile_ids_equal(expected, actual)
 
     def test_2d(self):
@@ -55,7 +58,9 @@ class TestTileIDsFromNestedList:
             (2, 0): ds(4),
             (2, 1): ds(5),
         }
-        actual = _infer_concat_order_from_positions(input)
+        actual: dict[tuple[int, ...], Dataset] = _infer_concat_order_from_positions(
+            input
+        )
         assert_combined_tile_ids_equal(expected, actual)
 
     def test_3d(self):
@@ -79,7 +84,9 @@ class TestTileIDsFromNestedList:
             (1, 2, 0): ds(10),
             (1, 2, 1): ds(11),
         }
-        actual = _infer_concat_order_from_positions(input)
+        actual: dict[tuple[int, ...], Dataset] = _infer_concat_order_from_positions(
+            input
+        )
         assert_combined_tile_ids_equal(expected, actual)
 
     def test_single_dataset(self):
@@ -87,7 +94,9 @@ class TestTileIDsFromNestedList:
         input = [ds]
 
         expected = {(0,): ds}
-        actual = _infer_concat_order_from_positions(input)
+        actual: dict[tuple[int, ...], Dataset] = _infer_concat_order_from_positions(
+            input
+        )
         assert_combined_tile_ids_equal(expected, actual)
 
     def test_redundant_nesting(self):
@@ -95,24 +104,30 @@ class TestTileIDsFromNestedList:
         input = [[ds(0)], [ds(1)]]
 
         expected = {(0, 0): ds(0), (1, 0): ds(1)}
-        actual = _infer_concat_order_from_positions(input)
+        actual: dict[tuple[int, ...], Dataset] = _infer_concat_order_from_positions(
+            input
+        )
         assert_combined_tile_ids_equal(expected, actual)
 
     def test_ignore_empty_list(self):
         ds = create_test_data(0)
-        input = [ds, []]
+        input: list = [ds, []]
         expected = {(0,): ds}
-        actual = _infer_concat_order_from_positions(input)
+        actual: dict[tuple[int, ...], Dataset] = _infer_concat_order_from_positions(
+            input
+        )
         assert_combined_tile_ids_equal(expected, actual)
 
     def test_uneven_depth_input(self):
         # Auto_combine won't work on ragged input
         # but this is just to increase test coverage
         ds = create_test_data
-        input = [ds(0), [ds(1), ds(2)]]
+        input: list = [ds(0), [ds(1), ds(2)]]
 
         expected = {(0,): ds(0), (1, 0): ds(1), (1, 1): ds(2)}
-        actual = _infer_concat_order_from_positions(input)
+        actual: dict[tuple[int, ...], Dataset] = _infer_concat_order_from_positions(
+            input
+        )
         assert_combined_tile_ids_equal(expected, actual)
 
     def test_uneven_length_input(self):
@@ -122,7 +137,9 @@ class TestTileIDsFromNestedList:
         input = [[ds(0)], [ds(1), ds(2)]]
 
         expected = {(0, 0): ds(0), (1, 0): ds(1), (1, 1): ds(2)}
-        actual = _infer_concat_order_from_positions(input)
+        actual: dict[tuple[int, ...], Dataset] = _infer_concat_order_from_positions(
+            input
+        )
         assert_combined_tile_ids_equal(expected, actual)
 
     def test_infer_from_datasets(self):
@@ -130,7 +147,9 @@ class TestTileIDsFromNestedList:
         input = [ds(0), ds(1)]
 
         expected = {(0,): ds(0), (1,): ds(1)}
-        actual = _infer_concat_order_from_positions(input)
+        actual: dict[tuple[int, ...], Dataset] = _infer_concat_order_from_positions(
+            input
+        )
         assert_combined_tile_ids_equal(expected, actual)
 
 
@@ -279,8 +298,10 @@ class TestNewTileIDs:
 
 
 class TestCombineND:
-    @pytest.mark.parametrize("concat_dim", ["dim1", "new_dim"])
-    def test_concat_once(self, create_combined_ids, concat_dim):
+    @pytest.mark.parametrize(
+        "concat_dim, kwargs", [("dim1", {}), ("new_dim", {"data_vars": "all"})]
+    )
+    def test_concat_once(self, create_combined_ids, concat_dim, kwargs):
         shape = (2,)
         combined_ids = create_combined_ids(shape)
         ds = create_test_data
@@ -290,9 +311,12 @@ class TestCombineND:
             data_vars="all",
             coords="different",
             compat="no_conflicts",
+            fill_value=dtypes.NA,
+            join="outer",
+            combine_attrs="drop",
         )
 
-        expected_ds = concat([ds(0), ds(1)], dim=concat_dim)
+        expected_ds = concat([ds(0), ds(1)], dim=concat_dim, **kwargs)
         assert_combined_tile_ids_equal(result, {(): expected_ds})
 
     def test_concat_only_first_dim(self, create_combined_ids):
@@ -304,6 +328,9 @@ class TestCombineND:
             data_vars="all",
             coords="different",
             compat="no_conflicts",
+            fill_value=dtypes.NA,
+            join="outer",
+            combine_attrs="drop",
         )
 
         ds = create_test_data
@@ -315,17 +342,28 @@ class TestCombineND:
 
         assert_combined_tile_ids_equal(result, expected)
 
-    @pytest.mark.parametrize("concat_dim", ["dim1", "new_dim"])
-    def test_concat_twice(self, create_combined_ids, concat_dim):
+    @pytest.mark.parametrize(
+        "concat_dim, kwargs", [("dim1", {}), ("new_dim", {"data_vars": "all"})]
+    )
+    def test_concat_twice(self, create_combined_ids, concat_dim, kwargs):
         shape = (2, 3)
         combined_ids = create_combined_ids(shape)
-        result = _combine_nd(combined_ids, concat_dims=["dim1", concat_dim])
+        result = _combine_nd(
+            combined_ids,
+            concat_dims=["dim1", concat_dim],
+            data_vars="all",
+            coords="different",
+            compat="no_conflicts",
+            fill_value=dtypes.NA,
+            join="outer",
+            combine_attrs="drop",
+        )
 
         ds = create_test_data
         partway1 = concat([ds(0), ds(3)], dim="dim1")
         partway2 = concat([ds(1), ds(4)], dim="dim1")
         partway3 = concat([ds(2), ds(5)], dim="dim1")
-        expected = concat([partway1, partway2, partway3], dim=concat_dim)
+        expected = concat([partway1, partway2, partway3], **kwargs, dim=concat_dim)
 
         assert_equal(result, expected)
 
@@ -417,7 +455,7 @@ class TestNestedCombine:
             Dataset({"a": ("x", [20]), "x": [0]}),
         ]
         expected = Dataset({"a": (("t", "x"), [[10], [20]]), "x": [0]})
-        actual = combine_nested(objs, concat_dim="t")
+        actual = combine_nested(objs, data_vars="all", concat_dim="t")
         assert_identical(expected, actual)
 
         # Same but with a DataArray as new dim, see GH #1988 and #2647
@@ -425,42 +463,60 @@ class TestNestedCombine:
         expected = Dataset(
             {"a": (("baz", "x"), [[10], [20]]), "x": [0], "baz": [100, 150]}
         )
-        actual = combine_nested(objs, concat_dim=dim)
+        actual = combine_nested(objs, data_vars="all", concat_dim=dim)
         assert_identical(expected, actual)
 
-    def test_nested_merge(self):
+    def test_nested_merge_with_self(self):
         data = Dataset({"x": 0})
         actual = combine_nested([data, data, data], concat_dim=None)
         assert_identical(data, actual)
 
+    def test_nested_merge_with_overlapping_values(self):
         ds1 = Dataset({"a": ("x", [1, 2]), "x": [0, 1]})
         ds2 = Dataset({"a": ("x", [2, 3]), "x": [1, 2]})
         expected = Dataset({"a": ("x", [1, 2, 3]), "x": [0, 1, 2]})
-        actual = combine_nested([ds1, ds2], concat_dim=None)
+        with pytest.warns(
+            FutureWarning,
+            match="will change from compat='no_conflicts' to compat='override'",
+        ):
+            actual = combine_nested([ds1, ds2], join="outer", concat_dim=None)
         assert_identical(expected, actual)
-        actual = combine_nested([ds1, ds2], concat_dim=[None])
+        actual = combine_nested(
+            [ds1, ds2], join="outer", compat="no_conflicts", concat_dim=None
+        )
+        assert_identical(expected, actual)
+        actual = combine_nested(
+            [ds1, ds2], join="outer", compat="no_conflicts", concat_dim=[None]
+        )
         assert_identical(expected, actual)
 
+    def test_nested_merge_with_nan_no_conflicts(self):
         tmp1 = Dataset({"x": 0})
         tmp2 = Dataset({"x": np.nan})
-        actual = combine_nested([tmp1, tmp2], concat_dim=None)
+        actual = combine_nested([tmp1, tmp2], compat="no_conflicts", concat_dim=None)
         assert_identical(tmp1, actual)
-        actual = combine_nested([tmp1, tmp2], concat_dim=[None])
+        with pytest.warns(
+            FutureWarning,
+            match="will change from compat='no_conflicts' to compat='override'",
+        ):
+            combine_nested([tmp1, tmp2], concat_dim=None)
+        actual = combine_nested([tmp1, tmp2], compat="no_conflicts", concat_dim=[None])
         assert_identical(tmp1, actual)
 
-        # Single object, with a concat_dim explicitly provided
+    def test_nested_merge_with_concat_dim_explicitly_provided(self):
         # Test the issue reported in GH #1988
         objs = [Dataset({"x": 0, "y": 1})]
         dim = DataArray([100], name="baz", dims="baz")
-        actual = combine_nested(objs, concat_dim=[dim])
+        actual = combine_nested(objs, concat_dim=[dim], data_vars="all")
         expected = Dataset({"x": ("baz", [0]), "y": ("baz", [1])}, {"baz": [100]})
         assert_identical(expected, actual)
 
+    def test_nested_merge_with_non_scalars(self):
         # Just making sure that auto_combine is doing what is
         # expected for non-scalar values, too.
         objs = [Dataset({"x": ("z", [0, 1]), "y": ("z", [1, 2])})]
         dim = DataArray([100], name="baz", dims="baz")
-        actual = combine_nested(objs, concat_dim=[dim])
+        actual = combine_nested(objs, concat_dim=[dim], data_vars="all")
         expected = Dataset(
             {"x": (("baz", "z"), [[0, 1]]), "y": (("baz", "z"), [[1, 2]])},
             {"baz": [100]},
@@ -510,10 +566,14 @@ class TestNestedCombine:
         partway1 = concat([ds(0), ds(3)], dim="dim1")
         partway2 = concat([ds(1), ds(4)], dim="dim1")
         partway3 = concat([ds(2), ds(5)], dim="dim1")
-        expected = concat([partway1, partway2, partway3], dim="dim2")
+        expected = concat([partway1, partway2, partway3], data_vars="all", dim="dim2")
 
         datasets = [[ds(0), ds(1), ds(2)], [ds(3), ds(4), ds(5)]]
-        result = combine_nested(datasets, concat_dim=["dim1", "dim2"])
+        result = combine_nested(
+            datasets,
+            data_vars="all",
+            concat_dim=["dim1", "dim2"],
+        )
         assert_equal(result, expected)
 
     def test_auto_combine_2d_combine_attrs_kwarg(self):
@@ -522,7 +582,7 @@ class TestNestedCombine:
         partway1 = concat([ds(0), ds(3)], dim="dim1")
         partway2 = concat([ds(1), ds(4)], dim="dim1")
         partway3 = concat([ds(2), ds(5)], dim="dim1")
-        expected = concat([partway1, partway2, partway3], dim="dim2")
+        expected = concat([partway1, partway2, partway3], data_vars="all", dim="dim2")
 
         expected_dict = {}
         expected_dict["drop"] = expected.copy(deep=True)
@@ -539,8 +599,8 @@ class TestNestedCombine:
         expected_dict["override"] = expected.copy(deep=True)
         expected_dict["override"].attrs = {"a": 1}
         f = lambda attrs, context: attrs[0]
-        expected_dict[f] = expected.copy(deep=True)
-        expected_dict[f].attrs = f([{"a": 1}], None)
+        expected_dict[f] = expected.copy(deep=True)  # type: ignore[index]
+        expected_dict[f].attrs = f([{"a": 1}], None)  # type: ignore[index]
 
         datasets = [[ds(0), ds(1), ds(2)], [ds(3), ds(4), ds(5)]]
 
@@ -553,14 +613,20 @@ class TestNestedCombine:
 
         with pytest.raises(ValueError, match=r"combine_attrs='identical'"):
             result = combine_nested(
-                datasets, concat_dim=["dim1", "dim2"], combine_attrs="identical"
+                datasets,
+                concat_dim=["dim1", "dim2"],
+                data_vars="all",
+                combine_attrs="identical",
             )
 
-        for combine_attrs in expected_dict:
+        for combine_attrs, expected in expected_dict.items():
             result = combine_nested(
-                datasets, concat_dim=["dim1", "dim2"], combine_attrs=combine_attrs
+                datasets,
+                concat_dim=["dim1", "dim2"],
+                data_vars="all",
+                combine_attrs=combine_attrs,  # type: ignore[arg-type]
             )
-            assert_identical(result, expected_dict[combine_attrs])
+            assert_identical(result, expected)
 
     def test_combine_nested_missing_data_new_dim(self):
         # Your data includes "time" and "station" dimensions, and each year's
@@ -572,7 +638,7 @@ class TestNestedCombine:
         expected = Dataset(
             {"a": (("t", "x"), [[np.nan, 2, 3], [1, 2, np.nan]])}, {"x": [0, 1, 2]}
         )
-        actual = combine_nested(datasets, concat_dim="t")
+        actual = combine_nested(datasets, data_vars="all", join="outer", concat_dim="t")
         assert_identical(expected, actual)
 
     def test_invalid_hypercube_input(self):
@@ -584,11 +650,11 @@ class TestNestedCombine:
         ):
             combine_nested(datasets, concat_dim=["dim1", "dim2"])
 
-        datasets = [[ds(0), ds(1)], [[ds(3), ds(4)]]]
+        datasets2: list = [[ds(0), ds(1)], [[ds(3), ds(4)]]]
         with pytest.raises(
             ValueError, match=r"sub-lists do not have consistent depths"
         ):
-            combine_nested(datasets, concat_dim=["dim1", "dim2"])
+            combine_nested(datasets2, concat_dim=["dim1", "dim2"])
 
         datasets = [[ds(0), ds(1)], [ds(3), ds(4)]]
         with pytest.raises(ValueError, match=r"concat_dims has length"):
@@ -650,7 +716,13 @@ class TestNestedCombine:
             },
             {"x": [0, 1, 2]},
         )
-        actual = combine_nested(datasets, concat_dim="t", fill_value=fill_value)
+        actual = combine_nested(
+            datasets,
+            concat_dim="t",
+            data_vars="all",
+            join="outer",
+            fill_value=fill_value,
+        )
         assert_identical(expected, actual)
 
     def test_combine_nested_unnamed_data_arrays(self):
@@ -710,26 +782,30 @@ class TestCombineDatasetsbyCoords:
         expected = Dataset({"x": [0, 1, 2]})
         assert_identical(expected, actual)
 
+    def test_combine_by_coords_handles_non_sorted_variables(self):
         # ensure auto_combine handles non-sorted variables
         objs = [
             Dataset({"x": ("a", [0]), "y": ("a", [0]), "a": [0]}),
             Dataset({"x": ("a", [1]), "y": ("a", [1]), "a": [1]}),
         ]
-        actual = combine_by_coords(objs)
+        actual = combine_by_coords(objs, join="outer")
         expected = Dataset({"x": ("a", [0, 1]), "y": ("a", [0, 1]), "a": [0, 1]})
         assert_identical(expected, actual)
 
+    def test_combine_by_coords_multiple_variables(self):
         objs = [Dataset({"x": [0], "y": [0]}), Dataset({"y": [1], "x": [1]})]
-        actual = combine_by_coords(objs)
+        actual = combine_by_coords(objs, join="outer")
         expected = Dataset({"x": [0, 1], "y": [0, 1]})
         assert_equal(actual, expected)
 
+    def test_combine_by_coords_for_scalar_variables(self):
         objs = [Dataset({"x": 0}), Dataset({"x": 1})]
         with pytest.raises(
             ValueError, match=r"Could not find any dimension coordinates"
         ):
             combine_by_coords(objs)
 
+    def test_combine_by_coords_requires_coord_or_index(self):
         objs = [Dataset({"x": [0], "y": [0]}), Dataset({"x": [0]})]
         with pytest.raises(
             ValueError,
@@ -959,9 +1035,13 @@ class TestCombineDatasetsbyCoords:
     def test_infer_order_from_coords(self):
         data = create_test_data()
         objs = [data.isel(dim2=slice(4, 9)), data.isel(dim2=slice(4))]
-        actual = combine_by_coords(objs)
+        actual = combine_by_coords(objs, data_vars="all")
         expected = data
-        assert expected.broadcast_equals(actual)
+        assert expected.broadcast_equals(actual)  # type: ignore[arg-type]
+
+        with set_options(use_new_combine_kwarg_defaults=True):
+            actual = combine_by_coords(objs)
+        assert_identical(actual, expected)
 
     def test_combine_leaving_bystander_dimensions(self):
         # Check non-monotonic bystander dimension coord doesn't raise
@@ -997,7 +1077,7 @@ class TestCombineDatasetsbyCoords:
             Dataset({"a": ("x", [1]), "x": [1]}),
         ]
         expected = Dataset({"a": ("x", [0, 1]), "b": ("x", [0, np.nan])}, {"x": [0, 1]})
-        actual = combine_by_coords(datasets)
+        actual = combine_by_coords(datasets, join="outer")
         assert_identical(expected, actual)
 
     def test_combine_by_coords_still_fails(self):
@@ -1005,7 +1085,7 @@ class TestCombineDatasetsbyCoords:
         # https://github.com/pydata/xarray/issues/508
         datasets = [Dataset({"x": 0}, {"y": 0}), Dataset({"x": 1}, {"y": 1, "z": 1})]
         with pytest.raises(ValueError):
-            combine_by_coords(datasets, "y")
+            combine_by_coords(datasets, "y")  # type: ignore[arg-type]
 
     def test_combine_by_coords_no_concat(self):
         objs = [Dataset({"x": 0}), Dataset({"y": 1})]
@@ -1014,7 +1094,7 @@ class TestCombineDatasetsbyCoords:
         assert_identical(expected, actual)
 
         objs = [Dataset({"x": 0, "y": 1}), Dataset({"y": np.nan, "z": 2})]
-        actual = combine_by_coords(objs)
+        actual = combine_by_coords(objs, compat="no_conflicts")
         expected = Dataset({"x": 0, "y": 1, "z": 2})
         assert_identical(expected, actual)
 
@@ -1032,7 +1112,7 @@ class TestCombineDatasetsbyCoords:
         x1 = Dataset({"a": (("y", "x"), [[1]])}, coords={"y": [0], "x": [0]})
         x2 = Dataset({"a": (("y", "x"), [[1]])}, coords={"y": [1], "x": [0]})
         x3 = Dataset({"a": (("y", "x"), [[1]])}, coords={"y": [0], "x": [1]})
-        actual = combine_by_coords([x1, x2, x3])
+        actual = combine_by_coords([x1, x2, x3], join="outer")
         expected = Dataset(
             {"a": (("y", "x"), [[1, 1], [1, np.nan]])},
             coords={"y": [0, 1], "x": [0, 1]},
@@ -1040,8 +1120,10 @@ class TestCombineDatasetsbyCoords:
         assert_identical(expected, actual)
 
         # test that this fails if fill_value is None
-        with pytest.raises(ValueError):
-            combine_by_coords([x1, x2, x3], fill_value=None)
+        with pytest.raises(
+            ValueError, match="supplied objects do not form a hypercube"
+        ):
+            combine_by_coords([x1, x2, x3], join="outer", fill_value=None)
 
     def test_combine_by_coords_override_order(self) -> None:
         # regression test for https://github.com/pydata/xarray/issues/8828
@@ -1111,7 +1193,7 @@ class TestCombineMixedObjectsbyCoords:
         named_da1 = DataArray(name="a", data=[1.0, 2.0], coords={"x": [0, 1]}, dims="x")
         named_da2 = DataArray(name="b", data=[3.0, 4.0], coords={"x": [2, 3]}, dims="x")
 
-        actual = combine_by_coords([named_da1, named_da2])
+        actual = combine_by_coords([named_da1, named_da2], join="outer")
         expected = Dataset(
             {
                 "a": DataArray(data=[1.0, 2.0], coords={"x": [0, 1]}, dims="x"),
@@ -1124,11 +1206,132 @@ class TestCombineMixedObjectsbyCoords:
         named_da1 = DataArray(name="a", data=[1.0, 2.0], coords={"x": [0, 1]}, dims="x")
         named_da2 = DataArray(name="a", data=[3.0, 4.0], coords={"x": [2, 3]}, dims="x")
 
-        actual = combine_by_coords([named_da1, named_da2])
-        expected = merge([named_da1, named_da2])
+        actual = combine_by_coords([named_da1, named_da2], join="outer")
+        expected = merge([named_da1, named_da2], compat="no_conflicts", join="outer")
         assert_identical(expected, actual)
 
 
+class TestNewDefaults:
+    def test_concat_along_existing_dim(self):
+        concat_dim = "dim1"
+        ds = create_test_data
+        with set_options(use_new_combine_kwarg_defaults=False):
+            old = concat([ds(0), ds(1)], dim=concat_dim)
+        with set_options(use_new_combine_kwarg_defaults=True):
+            new = concat([ds(0), ds(1)], dim=concat_dim)
+        assert_identical(old, new)
+
+    def test_concat_along_new_dim(self):
+        concat_dim = "new_dim"
+        ds = create_test_data
+        with set_options(use_new_combine_kwarg_defaults=False):
+            old = concat([ds(0), ds(1)], dim=concat_dim)
+        with set_options(use_new_combine_kwarg_defaults=True):
+            new = concat([ds(0), ds(1)], dim=concat_dim)
+
+        assert concat_dim in old.dims
+        assert concat_dim in new.dims
+
+    def test_nested_merge_with_overlapping_values(self):
+        ds1 = Dataset({"a": ("x", [1, 2]), "x": [0, 1]})
+        ds2 = Dataset({"a": ("x", [2, 3]), "x": [1, 2]})
+        expected = Dataset({"a": ("x", [1, 2, 3]), "x": [0, 1, 2]})
+        with set_options(use_new_combine_kwarg_defaults=False):
+            with pytest.warns(
+                FutureWarning, match="will change from join='outer' to join='exact'"
+            ):
+                with pytest.warns(
+                    FutureWarning,
+                    match="will change from compat='no_conflicts' to compat='override'",
+                ):
+                    old = combine_nested([ds1, ds2], concat_dim=None)
+        with set_options(use_new_combine_kwarg_defaults=True):
+            with pytest.raises(ValueError, match="might be related to new default"):
+                combine_nested([ds1, ds2], concat_dim=None)
+
+        assert_identical(old, expected)
+
+    def test_nested_merge_with_nan_order_matters(self):
+        ds1 = Dataset({"x": 0})
+        ds2 = Dataset({"x": np.nan})
+        with set_options(use_new_combine_kwarg_defaults=False):
+            with pytest.warns(
+                FutureWarning,
+                match="will change from compat='no_conflicts' to compat='override'",
+            ):
+                old = combine_nested([ds1, ds2], concat_dim=None)
+        with set_options(use_new_combine_kwarg_defaults=True):
+            new = combine_nested([ds1, ds2], concat_dim=None)
+
+        assert_identical(ds1, old)
+        assert_identical(old, new)
+
+        with set_options(use_new_combine_kwarg_defaults=False):
+            with pytest.warns(
+                FutureWarning,
+                match="will change from compat='no_conflicts' to compat='override'",
+            ):
+                old = combine_nested([ds2, ds1], concat_dim=None)
+        with set_options(use_new_combine_kwarg_defaults=True):
+            new = combine_nested([ds2, ds1], concat_dim=None)
+
+        assert_identical(ds1, old)
+        with pytest.raises(AssertionError):
+            assert_identical(old, new)
+
+    def test_nested_merge_with_concat_dim_explicitly_provided(self):
+        # Test the issue reported in GH #1988
+        objs = [Dataset({"x": 0, "y": 1})]
+        dim = DataArray([100], name="baz", dims="baz")
+        expected = Dataset({"x": ("baz", [0]), "y": ("baz", [1])}, {"baz": [100]})
+
+        with set_options(use_new_combine_kwarg_defaults=False):
+            old = combine_nested(objs, concat_dim=dim)
+        with set_options(use_new_combine_kwarg_defaults=True):
+            new = combine_nested(objs, concat_dim=dim)
+
+        assert_identical(expected, old)
+        assert_identical(old, new)
+
+    def test_combine_nested_missing_data_new_dim(self):
+        # Your data includes "time" and "station" dimensions, and each year's
+        # data has a different set of stations.
+        datasets = [
+            Dataset({"a": ("x", [2, 3]), "x": [1, 2]}),
+            Dataset({"a": ("x", [1, 2]), "x": [0, 1]}),
+        ]
+        expected = Dataset(
+            {"a": (("t", "x"), [[np.nan, 2, 3], [1, 2, np.nan]])}, {"x": [0, 1, 2]}
+        )
+        with set_options(use_new_combine_kwarg_defaults=False):
+            with pytest.warns(
+                FutureWarning, match="will change from join='outer' to join='exact'"
+            ):
+                old = combine_nested(datasets, concat_dim="t")
+        with set_options(use_new_combine_kwarg_defaults=True):
+            with pytest.raises(ValueError, match="might be related to new default"):
+                combine_nested(datasets, concat_dim="t")
+            new = combine_nested(datasets, concat_dim="t", join="outer")
+
+        assert_identical(expected, old)
+        assert_identical(expected, new)
+
+    def test_combine_by_coords_multiple_variables(self):
+        objs = [Dataset({"x": [0], "y": [0]}), Dataset({"y": [1], "x": [1]})]
+        expected = Dataset({"x": [0, 1], "y": [0, 1]})
+
+        with set_options(use_new_combine_kwarg_defaults=False):
+            with pytest.warns(
+                FutureWarning, match="will change from join='outer' to join='exact'"
+            ):
+                old = combine_by_coords(objs)
+        with set_options(use_new_combine_kwarg_defaults=True):
+            with pytest.raises(ValueError, match="might be related to new default"):
+                combine_by_coords(objs)
+
+        assert_identical(old, expected)
+
+
 @requires_cftime
 def test_combine_by_coords_distant_cftime_dates():
     # Regression test for https://github.com/pydata/xarray/issues/3535
diff -pruN 2025.03.1-8/xarray/tests/test_computation.py 2025.10.1-1/xarray/tests/test_computation.py
--- 2025.03.1-8/xarray/tests/test_computation.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/xarray/tests/test_computation.py	2025-10-10 10:38:05.000000000 +0000
@@ -926,7 +926,7 @@ def test_keep_attrs_strategies_dataarray
     compute_attrs = {
         "dim": lambda attrs, default: (attrs, default),
         "coord": lambda attrs, default: (default, attrs),
-    }.get(variant)
+    }[variant]
 
     dim_attrs, coord_attrs = compute_attrs(attrs, [{}, {}, {}])
 
@@ -1092,7 +1092,8 @@ def test_keep_attrs_strategies_dataset_v
         "data": lambda attrs, default: (attrs, default, default),
         "dim": lambda attrs, default: (default, attrs, default),
         "coord": lambda attrs, default: (default, default, attrs),
-    }.get(variant)
+    }[variant]
+
     data_attrs, dim_attrs, coord_attrs = compute_attrs(attrs, [{}, {}, {}])
 
     a = xr.Dataset(
@@ -1397,7 +1398,7 @@ def test_apply_dask_new_output_sizes_not
             da,
             input_core_dims=[["i", "j"]],
             output_core_dims=[["i", "j"]],
-            exclude_dims=set(("i", "j")),
+            exclude_dims={"i", "j"},
             dask="parallelized",
         )
 
@@ -1549,7 +1550,7 @@ def test_vectorize_exclude_dims_dask() -
 
 
 def test_corr_only_dataarray() -> None:
-    with pytest.raises(TypeError, match="Only xr.DataArray is supported"):
+    with pytest.raises(TypeError, match=r"Only xr.DataArray is supported"):
         xr.corr(xr.Dataset(), xr.Dataset())  # type: ignore[type-var]
 
 
@@ -1852,7 +1853,6 @@ def test_equally_weighted_cov_corr() ->
         coords={"time": pd.date_range("2000-01-01", freq="1D", periods=21)},
         dims=("a", "time", "x"),
     )
-    #
     assert_allclose(
         xr.cov(da, db, weights=None), xr.cov(da, db, weights=xr.DataArray(1))
     )
@@ -2014,9 +2014,8 @@ def test_output_wrong_dim_size() -> None
 
 @pytest.mark.parametrize("use_dask", [True, False])
 def test_dot(use_dask: bool) -> None:
-    if use_dask:
-        if not has_dask:
-            pytest.skip("test for dask.")
+    if use_dask and not has_dask:
+        pytest.skip("test for dask.")
 
     a = np.arange(30 * 4).reshape(30, 4)
     b = np.arange(30 * 4 * 5).reshape(30, 4, 5)
@@ -2146,9 +2145,8 @@ def test_dot(use_dask: bool) -> None:
 def test_dot_align_coords(use_dask: bool) -> None:
     # GH 3694
 
-    if use_dask:
-        if not has_dask:
-            pytest.skip("test for dask.")
+    if use_dask and not has_dask:
+        pytest.skip("test for dask.")
 
     a = np.arange(30 * 4).reshape(30, 4)
     b = np.arange(30 * 4 * 5).reshape(30, 4, 5)
@@ -2206,6 +2204,7 @@ def test_where() -> None:
 def test_where_attrs() -> None:
     cond = xr.DataArray([True, False], coords={"a": [0, 1]}, attrs={"attr": "cond_da"})
     cond["a"].attrs = {"attr": "cond_coord"}
+    input_cond = cond.copy()
     x = xr.DataArray([1, 1], coords={"a": [0, 1]}, attrs={"attr": "x_da"})
     x["a"].attrs = {"attr": "x_coord"}
     y = xr.DataArray([0, 0], coords={"a": [0, 1]}, attrs={"attr": "y_da"})
@@ -2216,6 +2215,22 @@ def test_where_attrs() -> None:
     expected = xr.DataArray([1, 0], coords={"a": [0, 1]}, attrs={"attr": "x_da"})
     expected["a"].attrs = {"attr": "x_coord"}
     assert_identical(expected, actual)
+    # Check also that input coordinate attributes weren't modified by reference
+    assert x["a"].attrs == {"attr": "x_coord"}
+    assert y["a"].attrs == {"attr": "y_coord"}
+    assert cond["a"].attrs == {"attr": "cond_coord"}
+    assert_identical(cond, input_cond)
+
+    # 3 DataArrays, drop attrs
+    actual = xr.where(cond, x, y, keep_attrs=False)
+    expected = xr.DataArray([1, 0], coords={"a": [0, 1]})
+    assert_identical(expected, actual)
+    assert_identical(expected.coords["a"], actual.coords["a"])
+    # Check also that input coordinate attributes weren't modified by reference
+    assert x["a"].attrs == {"attr": "x_coord"}
+    assert y["a"].attrs == {"attr": "y_coord"}
+    assert cond["a"].attrs == {"attr": "cond_coord"}
+    assert_identical(cond, input_cond)
 
     # x as a scalar, takes no attrs
     actual = xr.where(cond, 0, y, keep_attrs=True)
@@ -2627,3 +2642,14 @@ def test_complex_number_reduce(compute_b
     # Check that xarray doesn't call into numbagg, which doesn't compile for complex
     # numbers at the moment (but will when numba supports dynamic compilation)
     da.min()
+
+
+def test_fix() -> None:
+    val = 3.0
+    val_fixed = np.fix(val)
+
+    da = xr.DataArray([val])
+    expected = xr.DataArray([val_fixed])
+
+    actual = np.fix(da)
+    assert_identical(expected, actual)
diff -pruN 2025.03.1-8/xarray/tests/test_concat.py 2025.10.1-1/xarray/tests/test_concat.py
--- 2025.03.1-8/xarray/tests/test_concat.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/xarray/tests/test_concat.py	2025-10-10 10:38:05.000000000 +0000
@@ -1,6 +1,7 @@
 from __future__ import annotations
 
 from collections.abc import Callable
+from contextlib import AbstractContextManager, nullcontext
 from copy import deepcopy
 from typing import TYPE_CHECKING, Any, Literal
 
@@ -8,8 +9,8 @@ import numpy as np
 import pandas as pd
 import pytest
 
-from xarray import DataArray, Dataset, Variable, concat
-from xarray.core import dtypes
+from xarray import AlignmentError, DataArray, Dataset, Variable, concat, set_options
+from xarray.core import dtypes, types
 from xarray.core.coordinates import Coordinates
 from xarray.core.indexes import PandasIndex
 from xarray.structure import merge
@@ -21,7 +22,9 @@ from xarray.tests import (
     assert_equal,
     assert_identical,
     requires_dask,
+    requires_pyarrow,
 )
+from xarray.tests.indexes import XYIndex
 from xarray.tests.test_dataset import create_test_data
 
 if TYPE_CHECKING:
@@ -43,7 +46,7 @@ def create_concat_datasets(
                 ["x", "y", "day"],
                 rng.standard_normal(size=(1, 4, 2)),
             )
-            data_vars = {v: data_tuple for v in variables}
+            data_vars = dict.fromkeys(variables, data_tuple)
             result.append(
                 Dataset(
                     data_vars=data_vars,
@@ -59,7 +62,7 @@ def create_concat_datasets(
                 ["x", "y"],
                 rng.standard_normal(size=(1, 4)),
             )
-            data_vars = {v: data_tuple for v in variables}
+            data_vars = dict.fromkeys(variables, data_tuple)
             result.append(
                 Dataset(
                     data_vars=data_vars,
@@ -133,9 +136,9 @@ def test_concat_compat() -> None:
     for var in ["has_x", "no_x_y"]:
         assert "y" not in result[var].dims and "y" not in result[var].coords
     with pytest.raises(ValueError, match=r"'q' not present in all datasets"):
-        concat([ds1, ds2], dim="q")
+        concat([ds1, ds2], dim="q", data_vars="all", join="outer")
     with pytest.raises(ValueError, match=r"'q' not present in all datasets"):
-        concat([ds2, ds1], dim="q")
+        concat([ds2, ds1], dim="q", data_vars="all", join="outer")
 
 
 def test_concat_missing_var() -> None:
@@ -154,19 +157,20 @@ def test_concat_missing_var() -> None:
     assert_identical(actual, expected)
 
 
-def test_concat_categorical() -> None:
+@pytest.mark.parametrize("var", ["var4", pytest.param("var5", marks=requires_pyarrow)])
+def test_concat_extension_array(var) -> None:
     data1 = create_test_data(use_extension_array=True)
     data2 = create_test_data(use_extension_array=True)
     concatenated = concat([data1, data2], dim="dim1")
-    assert (
-        concatenated["var4"]
-        == type(data2["var4"].variable.data.array)._concat_same_type(
+    assert pd.Series(
+        concatenated[var]
+        == type(data2[var].variable.data)._concat_same_type(
             [
-                data1["var4"].variable.data.array,
-                data2["var4"].variable.data.array,
+                data1[var].variable.data,
+                data2[var].variable.data,
             ]
         )
-    ).all()
+    ).all()  # need to wrap in series because pyarrow bool does not support `all`
 
 
 def test_concat_missing_multiple_consecutive_var() -> None:
@@ -214,8 +218,12 @@ def test_concat_second_empty() -> None:
     actual = concat([ds1, ds2], dim="y", coords="all")
     assert_identical(actual, expected)
 
+
+def test_concat_second_empty_with_scalar_data_var_only_on_first() -> None:
     # Check concatenating scalar data_var only present in ds1
-    ds1["b"] = 0.1
+    ds1 = Dataset(data_vars={"a": ("y", [0.1]), "b": 0.1}, coords={"x": 0.1})
+    ds2 = Dataset(coords={"x": 0.1})
+
     expected = Dataset(
         data_vars={"a": ("y", [0.1, np.nan]), "b": ("y", [0.1, np.nan])},
         coords={"x": ("y", [0.1, 0.1])},
@@ -226,7 +234,9 @@ def test_concat_second_empty() -> None:
     expected = Dataset(
         data_vars={"a": ("y", [0.1, np.nan]), "b": 0.1}, coords={"x": 0.1}
     )
-    actual = concat([ds1, ds2], dim="y", coords="different", data_vars="different")
+    actual = concat(
+        [ds1, ds2], dim="y", coords="different", data_vars="different", compat="equals"
+    )
     assert_identical(actual, expected)
 
 
@@ -263,10 +273,13 @@ def test_concat_multiple_datasets_missin
         "cloud_cover",
     ]
 
+    # must specify if concat_dim='day' is not part of the vars
+    kwargs = {"data_vars": "all"} if not include_day else {}
+
     datasets = create_concat_datasets(
         len(vars_to_drop), seed=123, include_day=include_day
     )
-    expected = concat(datasets, dim="day")
+    expected = concat(datasets, dim="day", **kwargs)  # type: ignore[call-overload]
 
     for i, name in enumerate(vars_to_drop):
         if include_day:
@@ -279,8 +292,7 @@ def test_concat_multiple_datasets_missin
         ds.drop_vars(varname)
         for ds, varname in zip(datasets, vars_to_drop, strict=True)
     ]
-
-    actual = concat(datasets, dim="day")
+    actual = concat(datasets, dim="day", **kwargs)  # type: ignore[call-overload]
 
     assert list(actual.data_vars.keys()) == [
         "pressure",
@@ -490,8 +502,8 @@ class TestConcatDataset:
         "dim,data", [["dim1", True], ["dim2", False]], indirect=["data"]
     )
     def test_concat_simple(self, data: Dataset, dim, coords) -> None:
-        datasets = [g for _, g in data.groupby(dim, squeeze=False)]
-        assert_identical(data, concat(datasets, dim, coords=coords))
+        datasets = [g for _, g in data.groupby(dim)]
+        assert_identical(data, concat(datasets, dim, coords=coords, compat="equals"))
 
     def test_concat_merge_variables_present_in_some_datasets(
         self, data: Dataset
@@ -512,7 +524,7 @@ class TestConcatDataset:
         assert_identical(expected, actual)
 
         # expand foo
-        actual = concat([data0, data1], "dim1")
+        actual = concat([data0, data1], "dim1", data_vars="all")
         foo = np.ones((8, 10), dtype=data1.foo.dtype) * np.nan
         foo[3:] = data1.foo.values[None, ...]
         expected = data.copy().assign(foo=(["dim1", "bar"], foo))
@@ -534,9 +546,11 @@ class TestConcatDataset:
         data = data.copy(deep=True)
         # make sure the coords argument behaves as expected
         data.coords["extra"] = ("dim4", np.arange(3))
-        datasets = [g.squeeze() for _, g in data.groupby(dim, squeeze=False)]
+        datasets = [g for _, g in data.groupby(dim)]
 
-        actual = concat(datasets, data[dim], coords=coords)
+        actual = concat(
+            datasets, data[dim], coords=coords, data_vars="all", compat="equals"
+        )
         if coords == "all":
             expected = np.array([data["extra"].values for _ in range(data.sizes[dim])])
             assert_array_equal(actual["extra"].values, expected)
@@ -568,41 +582,72 @@ class TestConcatDataset:
         actual = concat(objs, dim="x", data_vars="minimal")
         assert_identical(data, actual)
 
-    def test_concat_data_vars(self) -> None:
+    @pytest.mark.parametrize("data_vars", ["minimal", "different", "all", [], ["foo"]])
+    def test_concat_data_vars(self, data_vars) -> None:
         data = Dataset({"foo": ("x", np.random.randn(10))})
         objs: list[Dataset] = [data.isel(x=slice(5)), data.isel(x=slice(5, None))]
-        for data_vars in ["minimal", "different", "all", [], ["foo"]]:
-            actual = concat(objs, dim="x", data_vars=data_vars)
-            assert_identical(data, actual)
+        actual = concat(objs, dim="x", data_vars=data_vars, compat="equals")
+        assert_identical(data, actual)
 
-    def test_concat_coords(self):
-        # TODO: annotating this func fails
+    @pytest.mark.parametrize("coords", ["different", "all", ["c"]])
+    def test_concat_coords(self, coords) -> None:
         data = Dataset({"foo": ("x", np.random.randn(10))})
         expected = data.assign_coords(c=("x", [0] * 5 + [1] * 5))
         objs = [
             data.isel(x=slice(5)).assign_coords(c=0),
             data.isel(x=slice(5, None)).assign_coords(c=1),
         ]
-        for coords in ["different", "all", ["c"]]:
+        if coords == "different":
+            actual = concat(objs, dim="x", coords=coords, compat="equals")
+        else:
             actual = concat(objs, dim="x", coords=coords)
-            assert_identical(expected, actual)
-        for coords in ["minimal", []]:
-            with pytest.raises(merge.MergeError, match="conflicting values"):
-                concat(objs, dim="x", coords=coords)
+        assert_identical(expected, actual)
+
+    @pytest.mark.parametrize("coords", ["minimal", []])
+    def test_concat_coords_raises_merge_error(self, coords) -> None:
+        data = Dataset({"foo": ("x", np.random.randn(10))})
+        objs = [
+            data.isel(x=slice(5)).assign_coords(c=0),
+            data.isel(x=slice(5, None)).assign_coords(c=1),
+        ]
+        with pytest.raises(merge.MergeError, match="conflicting values"):
+            concat(objs, dim="x", coords=coords, compat="equals")
 
-    def test_concat_constant_index(self):
-        # TODO: annotating this func fails
+    @pytest.mark.parametrize("data_vars", ["different", "all", ["foo"]])
+    def test_concat_constant_index(self, data_vars) -> None:
         # GH425
         ds1 = Dataset({"foo": 1.5}, {"y": 1})
         ds2 = Dataset({"foo": 2.5}, {"y": 1})
         expected = Dataset({"foo": ("y", [1.5, 2.5]), "y": [1, 1]})
-        for mode in ["different", "all", ["foo"]]:
-            actual = concat([ds1, ds2], "y", data_vars=mode)
-            assert_identical(expected, actual)
-        with pytest.raises(merge.MergeError, match="conflicting values"):
-            # previously dim="y", and raised error which makes no sense.
-            # "foo" has dimension "y" so minimal should concatenate it?
-            concat([ds1, ds2], "new_dim", data_vars="minimal")
+        if data_vars == "different":
+            actual = concat([ds1, ds2], "y", data_vars=data_vars, compat="equals")
+        else:
+            actual = concat([ds1, ds2], "y", data_vars=data_vars)
+        assert_identical(expected, actual)
+
+    def test_concat_constant_index_None(self) -> None:
+        ds1 = Dataset({"foo": 1.5}, {"y": 1})
+        ds2 = Dataset({"foo": 2.5}, {"y": 1})
+        actual = concat([ds1, ds2], "new_dim", data_vars=None, compat="equals")
+        expected = Dataset(
+            {"foo": ("new_dim", [1.5, 2.5])},
+            coords={"y": 1},
+        )
+        assert_identical(actual, expected)
+
+    def test_concat_constant_index_minimal(self) -> None:
+        ds1 = Dataset({"foo": 1.5}, {"y": 1})
+        ds2 = Dataset({"foo": 2.5}, {"y": 1})
+
+        with set_options(use_new_combine_kwarg_defaults=False):
+            with pytest.raises(merge.MergeError, match="conflicting values"):
+                concat([ds1, ds2], dim="new_dim", data_vars="minimal")
+
+        with set_options(use_new_combine_kwarg_defaults=True):
+            with pytest.raises(
+                ValueError, match="data_vars='minimal' and coords='minimal'"
+            ):
+                concat([ds1, ds2], dim="new_dim", data_vars="minimal")
 
     def test_concat_size0(self) -> None:
         data = create_test_data()
@@ -616,7 +661,7 @@ class TestConcatDataset:
     def test_concat_autoalign(self) -> None:
         ds1 = Dataset({"foo": DataArray([1, 2], coords=[("x", [1, 2])])})
         ds2 = Dataset({"foo": DataArray([1, 2], coords=[("x", [1, 3])])})
-        actual = concat([ds1, ds2], "y")
+        actual = concat([ds1, ds2], "y", data_vars="all", join="outer")
         expected = Dataset(
             {
                 "foo": DataArray(
@@ -628,8 +673,7 @@ class TestConcatDataset:
         )
         assert_identical(expected, actual)
 
-    def test_concat_errors(self):
-        # TODO: annotating this func fails
+    def test_concat_errors(self) -> None:
         data = create_test_data()
         split_data = [data.isel(dim1=slice(3)), data.isel(dim1=slice(3, None))]
 
@@ -659,7 +703,7 @@ class TestConcatDataset:
         assert_identical(data, concat([data0, data1], "dim1", compat="equals"))
 
         with pytest.raises(ValueError, match=r"compat.* invalid"):
-            concat(split_data, "dim1", compat="foobar")
+            concat(split_data, "dim1", compat="foobar")  # type: ignore[call-overload]
 
         with pytest.raises(ValueError, match=r"compat.* invalid"):
             concat(split_data, "dim1", compat="minimal")
@@ -705,9 +749,9 @@ class TestConcatDataset:
         with pytest.raises(ValueError, match=r"cannot align.*exact.*dimensions.*'y'"):
             actual = concat([ds1, ds2], join="exact", dim="x")
 
-        for join in expected:
+        for join, expected_item in expected.items():
             actual = concat([ds1, ds2], join=join, dim="x")
-            assert_equal(actual, expected[join])
+            assert_equal(actual, expected_item)
 
         # regression test for #3681
         actual = concat(
@@ -844,8 +888,7 @@ class TestConcatDataset:
 
             assert_identical(actual, expected)
 
-    def test_concat_promote_shape(self) -> None:
-        # mixed dims within variables
+    def test_concat_promote_shape_with_mixed_dims_within_variables(self) -> None:
         objs = [Dataset({}, {"x": 0}), Dataset({"x": [1]})]
         actual = concat(objs, "x")
         expected = Dataset({"x": [0, 1]})
@@ -855,25 +898,30 @@ class TestConcatDataset:
         actual = concat(objs, "x")
         assert_identical(actual, expected)
 
-        # mixed dims between variables
+    def test_concat_promote_shape_with_mixed_dims_between_variables(self) -> None:
         objs = [Dataset({"x": [2], "y": 3}), Dataset({"x": [4], "y": 5})]
-        actual = concat(objs, "x")
+        actual = concat(objs, "x", data_vars="all")
         expected = Dataset({"x": [2, 4], "y": ("x", [3, 5])})
         assert_identical(actual, expected)
 
-        # mixed dims in coord variable
+    def test_concat_promote_shape_with_mixed_dims_in_coord_variable(self) -> None:
         objs = [Dataset({"x": [0]}, {"y": -1}), Dataset({"x": [1]}, {"y": ("x", [-2])})]
         actual = concat(objs, "x")
         expected = Dataset({"x": [0, 1]}, {"y": ("x", [-1, -2])})
         assert_identical(actual, expected)
 
-        # scalars with mixed lengths along concat dim -- values should repeat
+    def test_concat_promote_shape_for_scalars_with_mixed_lengths_along_concat_dim(
+        self,
+    ) -> None:
+        # values should repeat
         objs = [Dataset({"x": [0]}, {"y": -1}), Dataset({"x": [1, 2]}, {"y": -2})]
-        actual = concat(objs, "x")
+        actual = concat(objs, "x", coords="different", compat="equals")
         expected = Dataset({"x": [0, 1, 2]}, {"y": ("x", [-1, -2, -2])})
         assert_identical(actual, expected)
+        actual = concat(objs, "x", coords="all")
+        assert_identical(actual, expected)
 
-        # broadcast 1d x 1d -> 2d
+    def test_concat_promote_shape_broadcast_1d_x_1d_goes_to_2d(self) -> None:
         objs = [
             Dataset({"z": ("x", [-1])}, {"x": [0], "y": [0]}),
             Dataset({"z": ("y", [1])}, {"x": [1], "y": [0]}),
@@ -882,6 +930,7 @@ class TestConcatDataset:
         expected = Dataset({"z": (("x", "y"), [[-1], [1]])}, {"x": [0, 1], "y": [0]})
         assert_identical(actual, expected)
 
+    def test_concat_promote_shape_with_scalar_coordinates(self) -> None:
         # regression GH6384
         objs = [
             Dataset({}, {"x": pd.Interval(-1, 0, closed="right")}),
@@ -898,6 +947,7 @@ class TestConcatDataset:
         )
         assert_identical(actual, expected)
 
+    def test_concat_promote_shape_with_coordinates_of_particular_dtypes(self) -> None:
         # regression GH6416 (coord dtype) and GH6434
         time_data1 = np.array(["2022-01-01", "2022-02-01"], dtype="datetime64[ns]")
         time_data2 = np.array("2022-03-01", dtype="datetime64[ns]")
@@ -931,14 +981,14 @@ class TestConcatDataset:
         objs = [Dataset({"x": 0}), Dataset({"x": 1})]
         coord = Variable("y", [3, 4], attrs={"foo": "bar"})
         expected = Dataset({"x": ("y", [0, 1]), "y": coord})
-        actual = concat(objs, coord)
+        actual = concat(objs, coord, data_vars="all")
         assert_identical(actual, expected)
 
     def test_concat_dim_is_dataarray(self) -> None:
         objs = [Dataset({"x": 0}), Dataset({"x": 1})]
         coord = DataArray([3, 4], dims="y", attrs={"foo": "bar"})
         expected = Dataset({"x": ("y", [0, 1]), "y": coord})
-        actual = concat(objs, coord)
+        actual = concat(objs, coord, data_vars="all")
         assert_identical(actual, expected)
 
     def test_concat_multiindex(self) -> None:
@@ -965,8 +1015,8 @@ class TestConcatDataset:
     @pytest.mark.parametrize("fill_value", [dtypes.NA, 2, 2.0, {"a": 2, "b": 1}])
     def test_concat_fill_value(self, fill_value) -> None:
         datasets = [
-            Dataset({"a": ("x", [2, 3]), "b": ("x", [-2, 1]), "x": [1, 2]}),
-            Dataset({"a": ("x", [1, 2]), "b": ("x", [3, -1]), "x": [0, 1]}),
+            Dataset({"a": ("x", [2, 3]), "b": ("x", [-2, 1])}, {"x": [1, 2]}),
+            Dataset({"a": ("x", [1, 2]), "b": ("x", [3, -1])}, {"x": [0, 1]}),
         ]
         if fill_value == dtypes.NA:
             # if we supply the default, we expect the missing value for a
@@ -984,7 +1034,9 @@ class TestConcatDataset:
             },
             {"x": [0, 1, 2]},
         )
-        actual = concat(datasets, dim="t", fill_value=fill_value)
+        actual = concat(
+            datasets, dim="t", fill_value=fill_value, data_vars="all", join="outer"
+        )
         assert_identical(actual, expected)
 
     @pytest.mark.parametrize("dtype", [str, bytes])
@@ -1006,7 +1058,7 @@ class TestConcatDataset:
                 "x2": np.array(["c", "d"], dtype=dtype),
             }
         )
-        actual = concat([da1, da2], dim=dim)
+        actual = concat([da1, da2], dim=dim, join="outer")
 
         assert np.issubdtype(actual.x2.dtype, dtype)
 
@@ -1031,7 +1083,7 @@ class TestConcatDataset:
         assert combined.indexes == {}
 
         # should not raise on stack
-        combined = concat(datasets, dim="z")
+        combined = concat(datasets, dim="z", data_vars="all")
         assert combined["a"].shape == (2, 3, 3)
         assert combined["a"].dims == ("z", "x", "y")
 
@@ -1096,11 +1148,15 @@ class TestConcatDataArray:
         stacked = concat(grouped, pd.Index(ds["x"], name="x"))
         assert_identical(foo, stacked)
 
-        actual2 = concat([foo[0], foo[1]], pd.Index([0, 1])).reset_coords(drop=True)
+        actual2 = concat(
+            [foo.isel(x=0), foo.isel(x=1)], pd.Index([0, 1]), coords="all"
+        ).reset_coords(drop=True)
         expected = foo[:2].rename({"x": "concat_dim"})
         assert_identical(expected, actual2)
 
-        actual3 = concat([foo[0], foo[1]], [0, 1]).reset_coords(drop=True)
+        actual3 = concat(
+            [foo.isel(x=0), foo.isel(x=1)], [0, 1], coords="all"
+        ).reset_coords(drop=True)
         expected = foo[:2].rename({"x": "concat_dim"})
         assert_identical(expected, actual3)
 
@@ -1108,7 +1164,7 @@ class TestConcatDataArray:
             concat([foo, bar], dim="w", compat="identical")
 
         with pytest.raises(ValueError, match=r"not a valid argument"):
-            concat([foo, bar], dim="w", data_vars="minimal")
+            concat([foo, bar], dim="w", data_vars="different")
 
     def test_concat_encoding(self) -> None:
         # Regression test for GH1297
@@ -1182,7 +1238,7 @@ class TestConcatDataArray:
             dims=["y", "x"],
             coords={"x": [1, 2, 3]},
         )
-        actual = concat((foo, bar), dim="y", fill_value=fill_value)
+        actual = concat((foo, bar), dim="y", fill_value=fill_value, join="outer")
         assert_identical(actual, expected)
 
     def test_concat_join_kwarg(self) -> None:
@@ -1217,9 +1273,9 @@ class TestConcatDataArray:
         with pytest.raises(ValueError, match=r"cannot align.*exact.*dimensions.*'y'"):
             actual = concat([ds1, ds2], join="exact", dim="x")
 
-        for join in expected:
+        for join, expected_item in expected.items():
             actual = concat([ds1, ds2], join=join, dim="x")
-            assert_equal(actual, expected[join].to_dataarray())
+            assert_equal(actual, expected_item.to_dataarray())
 
     def test_concat_combine_attrs_kwarg(self) -> None:
         da1 = DataArray([0], coords=[("x", [0])], attrs={"b": 42})
@@ -1241,9 +1297,9 @@ class TestConcatDataArray:
             da3.attrs["b"] = 44
             actual = concat([da1, da3], dim="x", combine_attrs="no_conflicts")
 
-        for combine_attrs in expected:
+        for combine_attrs, expected_item in expected.items():
             actual = concat([da1, da2], dim="x", combine_attrs=combine_attrs)
-            assert_identical(actual, expected[combine_attrs])
+            assert_identical(actual, expected_item)
 
     @pytest.mark.parametrize("dtype", [str, bytes])
     @pytest.mark.parametrize("dim", ["x1", "x2"])
@@ -1260,7 +1316,7 @@ class TestConcatDataArray:
             dims=["x1", "x2"],
             coords={"x1": np.array([1, 2]), "x2": np.array(["c", "d"], dtype=dtype)},
         )
-        actual = concat([da1, da2], dim=dim)
+        actual = concat([da1, da2], dim=dim, join="outer")
 
         assert np.issubdtype(actual.x2.dtype, dtype)
 
@@ -1285,16 +1341,17 @@ def test_concat_attrs_first_variable(att
     assert concat_attrs == attr1
 
 
-def test_concat_merge_single_non_dim_coord():
-    # TODO: annotating this func fails
+def test_concat_merge_single_non_dim_coord() -> None:
     da1 = DataArray([1, 2, 3], dims="x", coords={"x": [1, 2, 3], "y": 1})
     da2 = DataArray([4, 5, 6], dims="x", coords={"x": [4, 5, 6]})
 
     expected = DataArray(range(1, 7), dims="x", coords={"x": range(1, 7), "y": 1})
 
-    for coords in ["different", "minimal"]:
-        actual = concat([da1, da2], "x", coords=coords)
-        assert_identical(actual, expected)
+    actual = concat([da1, da2], "x", coords="minimal", compat="override")
+    assert_identical(actual, expected)
+
+    actual = concat([da1, da2], "x", coords="different", compat="equals")
+    assert_identical(actual, expected)
 
     with pytest.raises(ValueError, match=r"'y' not present in all datasets."):
         concat([da1, da2], dim="x", coords="all")
@@ -1302,9 +1359,12 @@ def test_concat_merge_single_non_dim_coo
     da1 = DataArray([1, 2, 3], dims="x", coords={"x": [1, 2, 3], "y": 1})
     da2 = DataArray([4, 5, 6], dims="x", coords={"x": [4, 5, 6]})
     da3 = DataArray([7, 8, 9], dims="x", coords={"x": [7, 8, 9], "y": 1})
-    for coords in ["different", "all"]:
-        with pytest.raises(ValueError, match=r"'y' not present in all datasets"):
-            concat([da1, da2, da3], dim="x", coords=coords)
+
+    with pytest.raises(ValueError, match=r"'y' not present in all datasets"):
+        concat([da1, da2, da3], dim="x", coords="all")
+
+    with pytest.raises(ValueError, match=r"'y' not present in all datasets"):
+        concat([da1, da2, da3], dim="x", coords="different", compat="equals")
 
 
 def test_concat_preserve_coordinate_order() -> None:
@@ -1379,3 +1439,150 @@ def test_concat_index_not_same_dim() ->
         match=r"Cannot concatenate along dimension 'x' indexes with dimensions.*",
     ):
         concat([ds1, ds2], dim="x")
+
+
+class TestNewDefaults:
+    def test_concat_second_empty_with_scalar_data_var_only_on_first(self) -> None:
+        ds1 = Dataset(data_vars={"a": ("y", [0.1]), "b": 0.1}, coords={"x": 0.1})
+        ds2 = Dataset(coords={"x": 0.1})
+
+        expected = Dataset(
+            data_vars={"a": ("y", [0.1, np.nan]), "b": 0.1}, coords={"x": 0.1}
+        )
+        with set_options(use_new_combine_kwarg_defaults=False):
+            with pytest.warns(
+                FutureWarning,
+                match="will change from compat='equals' to compat='override'",
+            ):
+                actual = concat(
+                    [ds1, ds2], dim="y", coords="different", data_vars="different"
+                )
+                assert_identical(actual, expected)
+        with set_options(use_new_combine_kwarg_defaults=True):
+            with pytest.raises(ValueError, match="might be related to new default"):
+                concat([ds1, ds2], dim="y", coords="different", data_vars="different")
+
+    def test_concat_multiple_datasets_missing_vars(self) -> None:
+        vars_to_drop = [
+            "temperature",
+            "pressure",
+            "humidity",
+            "precipitation",
+            "cloud_cover",
+        ]
+
+        datasets = create_concat_datasets(
+            len(vars_to_drop), seed=123, include_day=False
+        )
+        # set up the test data
+        datasets = [
+            ds.drop_vars(varname)
+            for ds, varname in zip(datasets, vars_to_drop, strict=True)
+        ]
+        with set_options(use_new_combine_kwarg_defaults=False):
+            old = concat(datasets, dim="day")
+        with set_options(use_new_combine_kwarg_defaults=True):
+            new = concat(datasets, dim="day")
+        assert_identical(old, new)
+
+    @pytest.mark.parametrize("coords", ["different", "minimal", "all"])
+    def test_concat_coords_kwarg(
+        self, coords: Literal["all", "minimal", "different"]
+    ) -> None:
+        data = create_test_data().drop_dims("dim3")
+
+        # make sure the coords argument behaves as expected
+        data.coords["extra"] = ("dim4", np.arange(3))
+        datasets = [g for _, g in data.groupby("dim1")]
+
+        with set_options(use_new_combine_kwarg_defaults=False):
+            expectation: AbstractContextManager = (
+                pytest.warns(
+                    FutureWarning,
+                    match="will change from compat='equals' to compat='override'",
+                )
+                if coords == "different"
+                else nullcontext()
+            )
+            with expectation:
+                old = concat(datasets, data["dim1"], coords=coords)
+
+        with set_options(use_new_combine_kwarg_defaults=True):
+            if coords == "different":
+                with pytest.raises(ValueError):
+                    concat(datasets, data["dim1"], coords=coords)
+            else:
+                new = concat(datasets, data["dim1"], coords=coords)
+                assert_identical(old, new)
+
+    def test_concat_promote_shape_for_scalars_with_mixed_lengths_along_concat_dim(
+        self,
+    ) -> None:
+        # values should repeat
+        objs = [Dataset({"x": [0]}, {"y": -1}), Dataset({"x": [1, 2]}, {"y": -2})]
+        expected = Dataset({"x": [0, 1, 2]}, {"y": ("x", [-1, -2, -2])})
+        with set_options(use_new_combine_kwarg_defaults=False):
+            with pytest.warns(
+                FutureWarning,
+                match="will change from coords='different' to coords='minimal'",
+            ):
+                old = concat(objs, "x")
+                assert_identical(old, expected)
+        with set_options(use_new_combine_kwarg_defaults=True):
+            new = concat(objs, "x")
+            with pytest.raises(AssertionError):
+                assert_identical(new, old)
+            with pytest.raises(ValueError, match="might be related to new default"):
+                concat(objs, "x", coords="different")
+            with pytest.raises(merge.MergeError, match="conflicting values"):
+                concat(objs, "x", compat="equals")
+
+            new = concat(objs, "x", coords="different", compat="equals")
+            assert_identical(old, new)
+
+
+def test_concat_multi_dim_index() -> None:
+    ds1 = (
+        Dataset(
+            {"foo": (("x", "y"), np.random.randn(2, 2))},
+            coords={"x": [1, 2], "y": [3, 4]},
+        )
+        .drop_indexes(["x", "y"])
+        .set_xindex(["x", "y"], XYIndex)
+    )
+    ds2 = (
+        Dataset(
+            {"foo": (("x", "y"), np.random.randn(2, 2))},
+            coords={"x": [1, 2], "y": [5, 6]},
+        )
+        .drop_indexes(["x", "y"])
+        .set_xindex(["x", "y"], XYIndex)
+    )
+
+    expected = (
+        Dataset(
+            {
+                "foo": (
+                    ("x", "y"),
+                    np.concatenate([ds1.foo.data, ds2.foo.data], axis=-1),
+                )
+            },
+            coords={"x": [1, 2], "y": [3, 4, 5, 6]},
+        )
+        .drop_indexes(["x", "y"])
+        .set_xindex(["x", "y"], XYIndex)
+    )
+    # note: missing 'override'
+    joins: list[types.JoinOptions] = ["inner", "outer", "exact", "left", "right"]
+    for join in joins:
+        actual = concat([ds1, ds2], dim="y", join=join)
+        assert_identical(actual, expected, check_default_indexes=False)
+
+    with pytest.raises(AlignmentError):
+        actual = concat([ds1, ds2], dim="x", join="exact")
+
+    # TODO: fix these, or raise better error message
+    with pytest.raises(AssertionError):
+        joins_lr: list[types.JoinOptions] = ["left", "right"]
+        for join in joins_lr:
+            actual = concat([ds1, ds2], dim="x", join=join)
diff -pruN 2025.03.1-8/xarray/tests/test_conventions.py 2025.10.1-1/xarray/tests/test_conventions.py
--- 2025.03.1-8/xarray/tests/test_conventions.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/xarray/tests/test_conventions.py	2025-10-10 10:38:05.000000000 +0000
@@ -37,6 +37,10 @@ class TestBoolTypeArray:
         assert bx.dtype == bool
         assert_array_equal(bx, np.array([True, False, True, True, False], dtype=bool))
 
+        x = np.array([[1, 0, 1], [0, 1, 0]], dtype="i1")
+        bx = coding.variables.BoolTypeArray(x)
+        assert_array_equal(bx.transpose((1, 0)), x.transpose((1, 0)))
+
 
 class TestNativeEndiannessArray:
     def test(self) -> None:
@@ -47,6 +51,11 @@ class TestNativeEndiannessArray:
         assert a.dtype == expected[:].dtype
         assert_array_equal(a, expected)
 
+        y = np.arange(6, dtype=">i8").reshape((2, 3))
+        b = coding.variables.NativeEndiannessArray(y)
+        expected2 = np.arange(6, dtype="int64").reshape((2, 3))
+        assert_array_equal(b.transpose((1, 0)), expected2.transpose((1, 0)))
+
 
 def test_decode_cf_with_conflicting_fill_missing_value() -> None:
     expected = Variable(["t"], [np.nan, np.nan, 2], {"units": "foobar"})
@@ -131,8 +140,17 @@ class TestEncodeCFVariable:
     def test_missing_fillvalue(self) -> None:
         v = Variable(["x"], np.array([np.nan, 1, 2, 3]))
         v.encoding = {"dtype": "int16"}
-        with pytest.warns(Warning, match="floating point data as an integer"):
+        # Expect both the SerializationWarning and the RuntimeWarning from numpy
+        with pytest.warns(Warning) as record:
             conventions.encode_cf_variable(v)
+        # Check we got the expected warnings
+        warning_messages = [str(w.message) for w in record]
+        assert any(
+            "floating point data as an integer" in msg for msg in warning_messages
+        )
+        assert any(
+            "invalid value encountered in cast" in msg for msg in warning_messages
+        )
 
     def test_multidimensional_coordinates(self) -> None:
         # regression test for GH1763
@@ -340,20 +358,20 @@ class TestDecodeCF:
         )
 
         original.temp.attrs["grid_mapping"] = "crs: x y"
-        vars, attrs, coords = conventions.decode_cf_variables(
+        _vars, _attrs, coords = conventions.decode_cf_variables(
             original.variables, {}, decode_coords="all"
         )
         assert coords == {"lat", "lon", "crs"}
 
         original.temp.attrs["grid_mapping"] = "crs: x y crs2: lat lon"
-        vars, attrs, coords = conventions.decode_cf_variables(
+        _vars, _attrs, coords = conventions.decode_cf_variables(
             original.variables, {}, decode_coords="all"
         )
         assert coords == {"lat", "lon", "crs", "crs2"}
 
         # stray colon
         original.temp.attrs["grid_mapping"] = "crs: x y crs2 : lat lon"
-        vars, attrs, coords = conventions.decode_cf_variables(
+        _vars, _attrs, coords = conventions.decode_cf_variables(
             original.variables, {}, decode_coords="all"
         )
         assert coords == {"lat", "lon", "crs", "crs2"}
@@ -364,14 +382,14 @@ class TestDecodeCF:
 
         del original.temp.attrs["grid_mapping"]
         original.temp.attrs["formula_terms"] = "A: lat D: lon E: crs2"
-        vars, attrs, coords = conventions.decode_cf_variables(
+        _vars, _attrs, coords = conventions.decode_cf_variables(
             original.variables, {}, decode_coords="all"
         )
         assert coords == {"lat", "lon", "crs2"}
 
         original.temp.attrs["formula_terms"] = "A: lat lon D: crs E: crs2"
         with pytest.warns(UserWarning, match="has malformed content"):
-            vars, attrs, coords = conventions.decode_cf_variables(
+            _vars, _attrs, coords = conventions.decode_cf_variables(
                 original.variables, {}, decode_coords="all"
             )
             assert coords == {"lat", "lon", "crs", "crs2"}
@@ -555,10 +573,10 @@ class TestDecodeCF:
 
 
 class CFEncodedInMemoryStore(WritableCFDataStore, InMemoryDataStore):
-    def encode_variable(self, var):
+    def encode_variable(self, var, name=None):
         """encode one variable"""
         coder = coding.strings.EncodedStringCoder(allows_unicode=True)
-        var = coder.encode(var)
+        var = coder.encode(var, name=name)
         return var
 
 
@@ -597,6 +615,10 @@ class TestCFEncodedDataStore(CFEncodedBa
         # CFEncodedInMemoryStore doesn't support explicit string encodings.
         pass
 
+    def test_encoding_unlimited_dims(self) -> None:
+        # CFEncodedInMemoryStore doesn't support unlimited_dims.
+        pass
+
 
 class TestDecodeCFVariableWithArrayUnits:
     def test_decode_cf_variable_with_array_units(self) -> None:
diff -pruN 2025.03.1-8/xarray/tests/test_coordinate_transform.py 2025.10.1-1/xarray/tests/test_coordinate_transform.py
--- 2025.03.1-8/xarray/tests/test_coordinate_transform.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/xarray/tests/test_coordinate_transform.py	2025-10-10 10:38:05.000000000 +0000
@@ -7,7 +7,7 @@ import pytest
 import xarray as xr
 from xarray.core.coordinate_transform import CoordinateTransform
 from xarray.core.indexes import CoordinateTransformIndex
-from xarray.tests import assert_equal
+from xarray.tests import assert_equal, assert_identical
 
 
 class SimpleCoordinateTransform(CoordinateTransform):
@@ -24,12 +24,17 @@ class SimpleCoordinateTransform(Coordina
 
     def forward(self, dim_positions: dict[str, Any]) -> dict[Hashable, Any]:
         assert set(dim_positions) == set(self.dims)
-        return {dim: dim_positions[dim] * self.scale for dim in self.xy_dims}
+        return {
+            name: dim_positions[dim] * self.scale
+            for name, dim in zip(self.coord_names, self.xy_dims, strict=False)
+        }
 
     def reverse(self, coord_labels: dict[Hashable, Any]) -> dict[str, Any]:
         return {dim: coord_labels[dim] / self.scale for dim in self.xy_dims}
 
-    def equals(self, other: "CoordinateTransform") -> bool:
+    def equals(
+        self, other: CoordinateTransform, exclude: frozenset[Hashable] | None = None
+    ) -> bool:
         if not isinstance(other, SimpleCoordinateTransform):
             return False
         return self.scale == other.scale
@@ -118,6 +123,17 @@ def test_coordinate_transform_variable_r
     )
 
 
+def test_coordinate_transform_variable_repr() -> None:
+    var = create_coords(scale=2.0, shape=(2, 2))["x"].variable
+
+    actual = repr(var)
+    expected = """
+<xarray.Variable (y: 2, x: 2)> Size: 32B
+[4 values with dtype=float64]
+    """.strip()
+    assert actual == expected
+
+
 def test_coordinate_transform_variable_basic_outer_indexing() -> None:
     var = create_coords(scale=2.0, shape=(4, 4))["x"].variable
 
@@ -201,18 +217,24 @@ def test_coordinate_transform_sel() -> N
     # doesn't work with coordinate transform index coordinate variables)
     assert actual.equals(expected)
 
-    with pytest.raises(ValueError, match=".*only supports selection.*nearest"):
+    with pytest.raises(ValueError, match=r".*only supports selection.*nearest"):
         ds.sel(x=xr.Variable("z", [0.5, 5.5]), y=xr.Variable("z", [0.0, 0.5]))
 
-    with pytest.raises(ValueError, match="missing labels for coordinate.*y"):
+    with pytest.raises(ValueError, match=r"missing labels for coordinate.*y"):
         ds.sel(x=[0.5, 5.5], method="nearest")
 
-    with pytest.raises(TypeError, match=".*only supports advanced.*indexing"):
+    with pytest.raises(TypeError, match=r".*only supports advanced.*indexing"):
         ds.sel(x=[0.5, 5.5], y=[0.0, 0.5], method="nearest")
 
-    with pytest.raises(ValueError, match=".*only supports advanced.*indexing"):
+    with pytest.raises(ValueError, match=r".*only supports advanced.*indexing"):
         ds.sel(
             x=xr.Variable("z", [0.5, 5.5]),
             y=xr.Variable("z", [0.0, 0.5, 1.5]),
             method="nearest",
         )
+
+
+def test_coordinate_transform_rename() -> None:
+    ds = xr.Dataset(coords=create_coords(scale=2.0, shape=(2, 2)))
+    roundtripped = ds.rename(x="u", y="v").rename(u="x", v="y")
+    assert_identical(ds, roundtripped, check_default_indexes=False)
diff -pruN 2025.03.1-8/xarray/tests/test_coordinates.py 2025.10.1-1/xarray/tests/test_coordinates.py
--- 2025.03.1-8/xarray/tests/test_coordinates.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/xarray/tests/test_coordinates.py	2025-10-10 10:38:05.000000000 +0000
@@ -50,7 +50,7 @@ class TestCoordinates:
 
         # coords + indexes not supported
         with pytest.raises(
-            ValueError, match="passing both.*Coordinates.*indexes.*not allowed"
+            ValueError, match=r"passing both.*Coordinates.*indexes.*not allowed"
         ):
             coords = Coordinates(
                 coords=expected.coords, indexes={"x": PandasIndex([0, 1, 2], "x")}
@@ -65,7 +65,7 @@ class TestCoordinates:
         with pytest.raises(ValueError, match="no coordinate variables found"):
             Coordinates(indexes={"x": idx})
 
-        with pytest.raises(TypeError, match=".* is not an `xarray.indexes.Index`"):
+        with pytest.raises(TypeError, match=r".* is not an `xarray.indexes.Index`"):
             Coordinates(
                 coords={"x": ("x", [1, 2, 3])},
                 indexes={"x": "not_an_xarray_index"},  # type: ignore[dict-item]
@@ -93,7 +93,7 @@ class TestCoordinates:
 
         idx = CustomIndexNoCoordsGenerated()
 
-        with pytest.raises(ValueError, match=".*index.*did not create any coordinate"):
+        with pytest.raises(ValueError, match=r".*index.*did not create any coordinate"):
             Coordinates.from_xindex(idx)
 
     def test_from_pandas_multiindex(self) -> None:
@@ -152,13 +152,17 @@ class TestCoordinates:
         coords = Coordinates(coords={"x": [0, 1, 2]})
 
         assert coords.equals(coords)
-        assert not coords.equals("not_a_coords")
+        # Test with a different Coordinates object instead of a string
+        other_coords = Coordinates(coords={"x": [3, 4, 5]})
+        assert not coords.equals(other_coords)
 
     def test_identical(self):
         coords = Coordinates(coords={"x": [0, 1, 2]})
 
         assert coords.identical(coords)
-        assert not coords.identical("not_a_coords")
+        # Test with a different Coordinates object instead of a string
+        other_coords = Coordinates(coords={"x": [3, 4, 5]})
+        assert not coords.identical(other_coords)
 
     def test_assign(self) -> None:
         coords = Coordinates(coords={"x": [0, 1, 2]})
@@ -208,3 +212,87 @@ class TestCoordinates:
         coords = Coordinates(coords={"x": var}, indexes={})
         ds = Dataset(coords=coords)
         assert ds.coords["x"].dims == ("x", "y")
+
+    def test_drop_vars(self):
+        coords = Coordinates(
+            coords={
+                "x": Variable("x", range(3)),
+                "y": Variable("y", list("ab")),
+                "a": Variable(["x", "y"], np.arange(6).reshape(3, 2)),
+            },
+            indexes={},
+        )
+
+        actual = coords.drop_vars("x")
+        assert isinstance(actual, Coordinates)
+        assert set(actual.variables) == {"a", "y"}
+
+        actual = coords.drop_vars(["x", "y"])
+        assert isinstance(actual, Coordinates)
+        assert set(actual.variables) == {"a"}
+
+    def test_drop_dims(self) -> None:
+        coords = Coordinates(
+            coords={
+                "x": Variable("x", range(3)),
+                "y": Variable("y", list("ab")),
+                "a": Variable(["x", "y"], np.arange(6).reshape(3, 2)),
+            },
+            indexes={},
+        )
+
+        actual = coords.drop_dims("x")
+        assert isinstance(actual, Coordinates)
+        assert set(actual.variables) == {"y"}
+
+        actual = coords.drop_dims(["x", "y"])
+        assert isinstance(actual, Coordinates)
+        assert set(actual.variables) == set()
+
+    def test_rename_dims(self) -> None:
+        coords = Coordinates(
+            coords={
+                "x": Variable("x", range(3)),
+                "y": Variable("y", list("ab")),
+                "a": Variable(["x", "y"], np.arange(6).reshape(3, 2)),
+            },
+            indexes={},
+        )
+
+        actual = coords.rename_dims({"x": "X"})
+        assert isinstance(actual, Coordinates)
+        assert set(actual.dims) == {"X", "y"}
+        assert set(actual.variables) == {"a", "x", "y"}
+
+        actual = coords.rename_dims({"x": "u", "y": "v"})
+        assert isinstance(actual, Coordinates)
+        assert set(actual.dims) == {"u", "v"}
+        assert set(actual.variables) == {"a", "x", "y"}
+
+    def test_rename_vars(self) -> None:
+        coords = Coordinates(
+            coords={
+                "x": Variable("x", range(3)),
+                "y": Variable("y", list("ab")),
+                "a": Variable(["x", "y"], np.arange(6).reshape(3, 2)),
+            },
+            indexes={},
+        )
+
+        actual = coords.rename_vars({"x": "X"})
+        assert isinstance(actual, Coordinates)
+        assert set(actual.dims) == {"x", "y"}
+        assert set(actual.variables) == {"a", "X", "y"}
+
+        actual = coords.rename_vars({"x": "u", "y": "v"})
+        assert isinstance(actual, Coordinates)
+        assert set(actual.dims) == {"x", "y"}
+        assert set(actual.variables) == {"a", "u", "v"}
+
+    def test_operator_merge(self) -> None:
+        coords1 = Coordinates({"x": ("x", [0, 1, 2])})
+        coords2 = Coordinates({"y": ("y", [3, 4, 5])})
+        expected = Dataset(coords={"x": [0, 1, 2], "y": [3, 4, 5]})
+
+        actual = coords1 | coords2
+        assert_identical(Dataset(coords=actual), expected)
diff -pruN 2025.03.1-8/xarray/tests/test_dask.py 2025.10.1-1/xarray/tests/test_dask.py
--- 2025.03.1-8/xarray/tests/test_dask.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/xarray/tests/test_dask.py	2025-10-10 10:38:05.000000000 +0000
@@ -5,6 +5,7 @@ import pickle
 import sys
 from contextlib import suppress
 from textwrap import dedent
+from typing import Any
 
 import numpy as np
 import pandas as pd
@@ -94,13 +95,14 @@ class TestVariable(DaskTestCase):
         self.assertLazyAndIdentical(self.eager_var, self.lazy_var.copy(deep=True))
 
     def test_chunk(self):
-        for chunks, expected in [
+        test_cases: list[tuple[int | dict[str, Any], tuple[tuple[int, ...], ...]]] = [
             ({}, ((2, 2), (2, 2, 2))),
             (3, ((3, 1), (3, 3))),
             ({"x": 3, "y": 3}, ((3, 1), (3, 3))),
             ({"x": 3}, ((3, 1), (2, 2, 2))),
             ({"x": (3, 1)}, ((3, 1), (2, 2, 2))),
-        ]:
+        ]
+        for chunks, expected in test_cases:
             rechunked = self.lazy_var.chunk(chunks)
             assert rechunked.chunks == expected
             self.assertLazyAndIdentical(self.eager_var, rechunked)
@@ -255,14 +257,10 @@ class TestVariable(DaskTestCase):
 
     def test_missing_methods(self):
         v = self.lazy_var
-        try:
+        with pytest.raises(NotImplementedError, match="dask"):
             v.argsort()
-        except NotImplementedError as err:
-            assert "dask" in str(err)
-        try:
-            v[0].item()
-        except NotImplementedError as err:
-            assert "dask" in str(err)
+        with pytest.raises(NotImplementedError, match="dask"):
+            v[0].item()  # type: ignore[attr-defined]
 
     def test_univariate_ufunc(self):
         u = self.eager_var
@@ -302,7 +300,7 @@ class TestVariable(DaskTestCase):
 
         (v2,) = dask.persist(v)
         assert v is not v2
-        assert len(v2.__dask_graph__()) < len(v.__dask_graph__())
+        assert len(v2.__dask_graph__()) < len(v.__dask_graph__())  # type: ignore[arg-type]
         assert v2.__dask_keys__() == v.__dask_keys__()
         assert dask.is_dask_collection(v)
         assert dask.is_dask_collection(v2)
@@ -349,7 +347,9 @@ class TestDataArrayAndDataset(DaskTestCa
         )
 
     def test_chunk(self) -> None:
-        for chunks, expected in [
+        test_cases: list[
+            tuple[int | str | dict[str, Any], tuple[tuple[int, ...], ...]]
+        ] = [
             ({}, ((2, 2), (2, 2, 2))),
             (3, ((3, 1), (3, 3))),
             ({"x": 3, "y": 3}, ((3, 1), (3, 3))),
@@ -358,7 +358,8 @@ class TestDataArrayAndDataset(DaskTestCa
             ({"x": "16B"}, ((1, 1, 1, 1), (2, 2, 2))),
             ("16B", ((1, 1, 1, 1), (1,) * 6)),
             ("16MB", ((4,), (6,))),
-        ]:
+        ]
+        for chunks, expected in test_cases:
             # Test DataArray
             rechunked = self.lazy_array.chunk(chunks)
             assert rechunked.chunks == expected
@@ -371,7 +372,7 @@ class TestDataArrayAndDataset(DaskTestCa
             lazy_dataset = self.lazy_array.to_dataset()
             eager_dataset = self.eager_array.to_dataset()
             expected_chunksizes = dict(zip(lazy_dataset.dims, expected, strict=True))
-            rechunked = lazy_dataset.chunk(chunks)
+            rechunked = lazy_dataset.chunk(chunks)  # type: ignore[assignment]
 
             # Dataset.chunks has a different return type to DataArray.chunks - see issue #5843
             assert rechunked.chunks == expected_chunksizes
@@ -446,7 +447,11 @@ class TestDataArrayAndDataset(DaskTestCa
 
         assert kernel_call_count == 0
         out = xr.concat(
-            [ds1, ds2, ds3], dim="n", data_vars="different", coords="different"
+            [ds1, ds2, ds3],
+            dim="n",
+            data_vars="different",
+            coords="different",
+            compat="equals",
         )
         # each kernel is computed exactly once
         assert kernel_call_count == 6
@@ -488,7 +493,11 @@ class TestDataArrayAndDataset(DaskTestCa
         # stop computing variables as it would not have any benefit
         ds4 = Dataset(data_vars={"d": ("x", [2.0])}, coords={"c": ("x", [2.0])})
         out = xr.concat(
-            [ds1, ds2, ds4, ds3], dim="n", data_vars="different", coords="different"
+            [ds1, ds2, ds4, ds3],
+            dim="n",
+            data_vars="different",
+            coords="different",
+            compat="equals",
         )
         # the variables of ds1 and ds2 were computed, but those of ds3 didn't
         assert kernel_call_count == 22
@@ -509,7 +518,11 @@ class TestDataArrayAndDataset(DaskTestCa
 
         # now check that concat() is correctly using dask name equality to skip loads
         out = xr.concat(
-            [ds1, ds1, ds1], dim="n", data_vars="different", coords="different"
+            [ds1, ds1, ds1],
+            dim="n",
+            data_vars="different",
+            coords="different",
+            compat="equals",
         )
         assert kernel_call_count == 24
         # variables are not loaded in the output
@@ -593,11 +606,12 @@ class TestDataArrayAndDataset(DaskTestCa
         u = self.eager_array.assign_coords(y=range(6))
         v = self.lazy_array.assign_coords(y=range(6))
 
-        for kwargs in [
+        kwargs_list: list[dict[str, Any]] = [
             {"x": [2, 3, 4]},
             {"x": [1, 100, 2, 101, 3]},
             {"x": [2.5, 3, 3.5], "y": [2, 2.5, 3]},
-        ]:
+        ]
+        for kwargs in kwargs_list:
             expected = u.reindex(**kwargs)
             actual = v.reindex(**kwargs)
             self.assertLazyAndAllClose(expected, actual)
@@ -658,7 +672,9 @@ class TestDataArrayAndDataset(DaskTestCa
         data = da.random.normal(size=(2, 3, 4), chunks=(1, 3, 4))
         arr = DataArray(data, dims=("w", "x", "y"))
         stacked = arr.stack(z=("x", "y"))
-        z = pd.MultiIndex.from_product([np.arange(3), np.arange(4)], names=["x", "y"])
+        z = pd.MultiIndex.from_product(
+            [list(range(3)), list(range(4))], names=["x", "y"]
+        )
         expected = DataArray(data.reshape(2, -1), {"z": z}, dims=["w", "z"])
         assert stacked.data.chunks == expected.data.chunks
         self.assertLazyAndEqual(expected, stacked)
@@ -1042,16 +1058,10 @@ def test_basic_compute():
             ds.foo.variable.compute()
 
 
-def test_dask_layers_and_dependencies():
+def test_dataset_as_delayed():
     ds = Dataset({"foo": ("x", range(5)), "bar": ("x", range(5))}).chunk()
 
-    x = dask.delayed(ds)
-    assert set(x.__dask_graph__().dependencies).issuperset(
-        ds.__dask_graph__().dependencies
-    )
-    assert set(x.foo.__dask_graph__().dependencies).issuperset(
-        ds.__dask_graph__().dependencies
-    )
+    assert dask.delayed(ds).compute() == ds.compute()
 
 
 def make_da():
@@ -1139,7 +1149,8 @@ def test_unify_chunks(map_ds):
 def test_unify_chunks_shallow_copy(obj, transform):
     obj = transform(obj)
     unified = obj.unify_chunks()
-    assert_identical(obj, unified) and obj is not obj.unify_chunks()
+    assert_identical(obj, unified)
+    # assert obj is not unified
 
 
 @pytest.mark.parametrize("obj", [make_da()])
@@ -1164,10 +1175,10 @@ def test_map_blocks_error(map_da, map_ds
         xr.map_blocks(returns_numpy, map_da)
 
     with pytest.raises(TypeError, match=r"args must be"):
-        xr.map_blocks(operator.add, map_da, args=10)
+        xr.map_blocks(operator.add, map_da, args=10)  # type: ignore[arg-type]
 
     with pytest.raises(TypeError, match=r"kwargs must be"):
-        xr.map_blocks(operator.add, map_da, args=[10], kwargs=[20])
+        xr.map_blocks(operator.add, map_da, args=[10], kwargs=[20])  # type: ignore[arg-type]
 
     def really_bad_func(darray):
         raise ValueError("couldn't do anything.")
@@ -1375,14 +1386,14 @@ def test_map_blocks_ds_transformations(f
 def test_map_blocks_da_ds_with_template(obj):
     func = lambda x: x.isel(x=[1])
     # a simple .isel(x=[1, 5, 9]) puts all those in a single chunk.
-    template = xr.concat([obj.isel(x=[i]) for i in [1, 5, 9]], dim="x")
+    template = xr.concat([obj.isel(x=[i]) for i in [1, 5, 9]], data_vars=None, dim="x")
     with raise_if_dask_computes():
         actual = xr.map_blocks(func, obj, template=template)
     assert_identical(actual, template)
 
     # Check that indexes are written into the graph directly
     dsk = dict(actual.__dask_graph__())
-    assert len({k for k in dsk if "x-coordinate" in k})
+    assert {k for k in dsk if "x-coordinate" in k}
     assert all(
         isinstance(v, PandasIndex) for k, v in dsk.items() if "x-coordinate" in k
     )
@@ -1439,7 +1450,7 @@ def test_map_blocks_errors_bad_template(
     with pytest.raises(ValueError, match=r"Received dimension 'x' of length 1"):
         xr.map_blocks(lambda x: x.isel(x=[1]), obj, template=obj).compute()
     with pytest.raises(TypeError, match=r"must be a DataArray"):
-        xr.map_blocks(lambda x: x.isel(x=[1]), obj, template=(obj,)).compute()
+        xr.map_blocks(lambda x: x.isel(x=[1]), obj, template=(obj,)).compute()  # type: ignore[arg-type]
     with pytest.raises(ValueError, match=r"map_blocks requires that one block"):
         xr.map_blocks(
             lambda x: x.isel(x=[1]).assign_coords(x=10), obj, template=obj.isel(x=[1])
@@ -1448,7 +1459,9 @@ def test_map_blocks_errors_bad_template(
         xr.map_blocks(
             lambda a: a.isel(x=[1]).assign_coords(x=[120]),  # assign bad index values
             obj,
-            template=xr.concat([obj.isel(x=[i]) for i in [1, 5, 9]], dim="x"),
+            template=xr.concat(
+                [obj.isel(x=[i]) for i in [1, 5, 9]], data_vars=None, dim="x"
+            ),
         ).compute()
 
 
@@ -1646,7 +1659,7 @@ def test_normalize_token_with_backend(ma
     with create_tmp_file(allow_cleanup_failure=ON_WINDOWS) as tmp_file:
         map_ds.to_netcdf(tmp_file)
         read = xr.open_dataset(tmp_file)
-        assert not dask.base.tokenize(map_ds) == dask.base.tokenize(read)
+        assert dask.base.tokenize(map_ds) != dask.base.tokenize(read)
         read.close()
 
 
@@ -1773,10 +1786,10 @@ def test_graph_manipulation():
     for a, b in ((v, v2), (da, da2), (ds, ds2)):
         assert a.__dask_layers__() != b.__dask_layers__()
         assert len(a.__dask_layers__()) == len(b.__dask_layers__())
-        assert a.__dask_graph__().keys() != b.__dask_graph__().keys()
-        assert len(a.__dask_graph__()) == len(b.__dask_graph__())
-        assert a.__dask_graph__().layers.keys() != b.__dask_graph__().layers.keys()
-        assert len(a.__dask_graph__().layers) == len(b.__dask_graph__().layers)
+        assert a.__dask_graph__().keys() != b.__dask_graph__().keys()  # type: ignore[union-attr]
+        assert len(a.__dask_graph__()) == len(b.__dask_graph__())  # type: ignore[arg-type]
+        assert a.__dask_graph__().layers.keys() != b.__dask_graph__().layers.keys()  # type: ignore[union-attr]
+        assert len(a.__dask_graph__().layers) == len(b.__dask_graph__().layers)  # type: ignore[union-attr]
 
     # Above we performed a slice operation; adding the two slices back together creates
     # a diamond-shaped dependency graph, which in turn will trigger a collision in layer
@@ -1828,3 +1841,15 @@ def test_idxmin_chunking():
     actual = da.idxmin("time")
     assert actual.chunksizes == {k: da.chunksizes[k] for k in ["x", "y"]}
     assert_identical(actual, da.compute().idxmin("time"))
+
+
+def test_conjugate():
+    # Test for https://github.com/pydata/xarray/issues/10302
+    z = 1j * da.arange(100)
+
+    data = xr.DataArray(z, coords={"x": np.arange(100)})
+
+    conj_data = data.conjugate()
+    assert dask.is_dask_collection(conj_data)
+
+    assert_equal(conj_data, data.conj())
diff -pruN 2025.03.1-8/xarray/tests/test_dataarray.py 2025.10.1-1/xarray/tests/test_dataarray.py
--- 2025.03.1-8/xarray/tests/test_dataarray.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/xarray/tests/test_dataarray.py	2025-10-10 10:38:05.000000000 +0000
@@ -33,7 +33,7 @@ from xarray import (
 from xarray.coders import CFDatetimeCoder
 from xarray.core import dtypes
 from xarray.core.common import full_like
-from xarray.core.coordinates import Coordinates
+from xarray.core.coordinates import Coordinates, CoordinateValidationError
 from xarray.core.indexes import Index, PandasIndex, filter_indexes_from_coords
 from xarray.core.types import QueryEngineOptions, QueryParserOptions
 from xarray.core.utils import is_scalar
@@ -162,7 +162,7 @@ class TestDataArray:
         with pytest.raises(ValueError, match=r"must be 1-dimensional"):
             self.ds["foo"].to_index()
         with pytest.raises(AttributeError):
-            self.dv.variable = self.v
+            self.dv.variable = self.v  # type: ignore[misc]
 
     def test_data_property(self) -> None:
         array = DataArray(np.zeros((3, 4)))
@@ -418,9 +418,13 @@ class TestDataArray:
         with pytest.raises(TypeError, match=r"is not hashable"):
             DataArray(data, dims=["x", []])  # type: ignore[list-item]
 
-        with pytest.raises(ValueError, match=r"conflicting sizes for dim"):
+        with pytest.raises(
+            CoordinateValidationError, match=r"conflicting sizes for dim"
+        ):
             DataArray([1, 2, 3], coords=[("x", [0, 1])])
-        with pytest.raises(ValueError, match=r"conflicting sizes for dim"):
+        with pytest.raises(
+            CoordinateValidationError, match=r"conflicting sizes for dim"
+        ):
             DataArray([1, 2], coords={"x": [0, 1], "y": ("x", [1])}, dims="x")
 
         with pytest.raises(ValueError, match=r"conflicting MultiIndex"):
@@ -529,6 +533,25 @@ class TestDataArray:
         # test coordinate variables copied
         assert da.coords["x"] is not coords.variables["x"]
 
+    def test_constructor_extra_dim_index_coord(self) -> None:
+        class AnyIndex(Index):
+            def should_add_coord_to_array(self, name, var, dims):
+                return True
+
+        idx = AnyIndex()
+        coords = Coordinates(
+            coords={
+                "x": ("x", [1, 2]),
+                "x_bounds": (("x", "x_bnds"), [(0.5, 1.5), (1.5, 2.5)]),
+            },
+            indexes={"x": idx, "x_bounds": idx},
+        )
+
+        actual = DataArray([1.0, 2.0], coords=coords, dims="x")
+
+        assert_identical(actual.coords, coords, check_default_indexes=False)
+        assert "x_bnds" not in actual.dims
+
     def test_equals_and_identical(self) -> None:
         orig = DataArray(np.arange(5.0), {"a": 42}, dims="x")
 
@@ -722,9 +745,19 @@ class TestDataArray:
         )
         assert_identical(da[[]], DataArray(np.zeros((0, 4)), dims=["x", "y"]))
 
+    def test_getitem_typeerror(self) -> None:
+        with pytest.raises(TypeError, match=r"unexpected indexer type"):
+            self.dv[True]
+        with pytest.raises(TypeError, match=r"unexpected indexer type"):
+            self.dv[np.array(True)]
+        with pytest.raises(TypeError, match=r"invalid indexer array"):
+            self.dv[3.0]
+        with pytest.raises(TypeError, match=r"invalid indexer array"):
+            self.dv[None]
+
     def test_setitem(self) -> None:
         # basic indexing should work as numpy's indexing
-        tuples = [
+        tuples: list[tuple[int | list[int] | slice, int | list[int] | slice]] = [
             (0, 0),
             (0, slice(None, None)),
             (slice(None, None), slice(None, None)),
@@ -1210,7 +1243,7 @@ class TestDataArray:
             self.dv.isel({dim: slice(5) for dim in self.dv.dims}), self.dv.head()
         )
         with pytest.raises(TypeError, match=r"either dict-like or a single int"):
-            self.dv.head([3])
+            self.dv.head([3])  # type: ignore[arg-type]
         with pytest.raises(TypeError, match=r"expected integer type"):
             self.dv.head(x=3.1)
         with pytest.raises(ValueError, match=r"expected positive int"):
@@ -1227,7 +1260,7 @@ class TestDataArray:
             self.dv.isel({dim: slice(-5, None) for dim in self.dv.dims}), self.dv.tail()
         )
         with pytest.raises(TypeError, match=r"either dict-like or a single int"):
-            self.dv.tail([3])
+            self.dv.tail([3])  # type: ignore[arg-type]
         with pytest.raises(TypeError, match=r"expected integer type"):
             self.dv.tail(x=3.1)
         with pytest.raises(ValueError, match=r"expected positive int"):
@@ -1240,7 +1273,7 @@ class TestDataArray:
             self.dv.thin(6),
         )
         with pytest.raises(TypeError, match=r"either dict-like or a single int"):
-            self.dv.thin([3])
+            self.dv.thin([3])  # type: ignore[arg-type]
         with pytest.raises(TypeError, match=r"expected integer type"):
             self.dv.thin(x=3.1)
         with pytest.raises(ValueError, match=r"expected positive int"):
@@ -1412,12 +1445,26 @@ class TestDataArray:
         # GH: 3512
         da = DataArray([0, 1], dims=["x"], coords={"x": [0, 1], "y": "a"})
         db = DataArray([2, 3], dims=["x"], coords={"x": [0, 1], "y": "b"})
-        data = xr.concat([da, db], dim="x").set_index(xy=["x", "y"])
+        data = xr.concat(
+            [da, db], dim="x", coords="different", compat="equals"
+        ).set_index(xy=["x", "y"])
         assert data.dims == ("xy",)
         actual = data.sel(y="a")
         expected = data.isel(xy=[0, 1]).unstack("xy").squeeze("y")
         assert_equal(actual, expected)
 
+    def test_concat_with_default_coords_warns(self) -> None:
+        da = DataArray([0, 1], dims=["x"], coords={"x": [0, 1], "y": "a"})
+        db = DataArray([2, 3], dims=["x"], coords={"x": [0, 1], "y": "b"})
+
+        with pytest.warns(FutureWarning):
+            original = xr.concat([da, db], dim="x")
+            assert original.y.size == 4
+        with set_options(use_new_combine_kwarg_defaults=True):
+            # default compat="override" will pick the first one
+            new = xr.concat([da, db], dim="x")
+            assert new.y.size == 1
+
     def test_virtual_default_coords(self) -> None:
         array = DataArray(np.zeros((5,)), dims="x")
         expected = DataArray(range(5), dims="x", name="x")
@@ -1602,11 +1649,11 @@ class TestDataArray:
 
         # GH: 2112
         da = xr.DataArray([0, 1, 2], dims="x")
-        with pytest.raises(ValueError):
+        with pytest.raises(CoordinateValidationError):
             da["x"] = [0, 1, 2, 3]  # size conflict
-        with pytest.raises(ValueError):
+        with pytest.raises(CoordinateValidationError):
             da.coords["x"] = [0, 1, 2, 3]  # size conflict
-        with pytest.raises(ValueError):
+        with pytest.raises(CoordinateValidationError):
             da.coords["x"] = ("y", [1, 2, 3])  # no new dimension to a DataArray
 
     def test_assign_coords_existing_multiindex(self) -> None:
@@ -1634,6 +1681,27 @@ class TestDataArray:
         assert_identical(actual.coords, coords, check_default_indexes=False)
         assert "y" not in actual.xindexes
 
+    def test_assign_coords_extra_dim_index_coord(self) -> None:
+        class AnyIndex(Index):
+            def should_add_coord_to_array(self, name, var, dims):
+                return True
+
+        idx = AnyIndex()
+        coords = Coordinates(
+            coords={
+                "x": ("x", [1, 2]),
+                "x_bounds": (("x", "x_bnds"), [(0.5, 1.5), (1.5, 2.5)]),
+            },
+            indexes={"x": idx, "x_bounds": idx},
+        )
+
+        da = DataArray([1.0, 2.0], dims="x")
+        actual = da.assign_coords(coords)
+        expected = DataArray([1.0, 2.0], coords=coords, dims="x")
+
+        assert_identical(actual, expected, check_default_indexes=False)
+        assert "x_bnds" not in actual.dims
+
     def test_coords_alignment(self) -> None:
         lhs = DataArray([1, 2, 3], [("x", [0, 1, 2])])
         rhs = DataArray([2, 3, 4], [("x", [1, 2, 3])])
@@ -1887,14 +1955,14 @@ class TestDataArray:
         da = DataArray([0, 0], coords={"x": ("y", [0, 1])}, dims="y")
 
         with pytest.warns(
-            UserWarning, match="rename 'x' to 'y' does not create an index.*"
+            UserWarning, match=r"rename 'x' to 'y' does not create an index.*"
         ):
             da.rename(x="y")
 
         da = xr.DataArray([0, 0], coords={"y": ("x", [0, 1])}, dims="x")
 
         with pytest.warns(
-            UserWarning, match="rename 'x' to 'y' does not create an index.*"
+            UserWarning, match=r"rename 'x' to 'y' does not create an index.*"
         ):
             da.rename(x="y")
 
@@ -2138,7 +2206,7 @@ class TestDataArray:
         assert_identical(other_way_expected, other_way)
 
     def test_set_index(self) -> None:
-        indexes = [self.mindex.get_level_values(n) for n in self.mindex.names]
+        indexes = [self.mindex.get_level_values(n) for n in self.mindex.names]  # type: ignore[arg-type,unused-ignore]  # pandas-stubs varies
         coords = {idx.name: ("x", idx) for idx in indexes}
         array = DataArray(self.mda.values, coords=coords, dims="x")
         expected = self.mda.copy()
@@ -2169,7 +2237,7 @@ class TestDataArray:
             obj.set_index(x="level_4")
 
     def test_reset_index(self) -> None:
-        indexes = [self.mindex.get_level_values(n) for n in self.mindex.names]
+        indexes = [self.mindex.get_level_values(n) for n in self.mindex.names]  # type: ignore[arg-type,unused-ignore]  # pandas-stubs varies
         coords = {idx.name: ("x", idx) for idx in indexes}
         expected = DataArray(self.mda.values, coords=coords, dims="x")
 
@@ -2267,10 +2335,18 @@ class TestDataArray:
         assert_equal(self.dv, np.maximum(self.dv, bar))
 
     def test_astype_attrs(self) -> None:
-        for v in [self.va.copy(), self.mda.copy(), self.ds.copy()]:
+        # Split into two loops for mypy - Variable, DataArray, and Dataset
+        # don't share a common base class, so mypy infers type object for v,
+        # which doesn't have the attrs or astype methods
+        for v in [self.mda.copy(), self.ds.copy()]:
             v.attrs["foo"] = "bar"
             assert v.attrs == v.astype(float).attrs
             assert not v.astype(float, keep_attrs=False).attrs
+        # Test Variable separately to avoid mypy inferring object type
+        va = self.va.copy()
+        va.attrs["foo"] = "bar"
+        assert va.attrs == va.astype(float).attrs
+        assert not va.astype(float, keep_attrs=False).attrs
 
     def test_astype_dtype(self) -> None:
         original = DataArray([-1, 1, 2, 3, 1000])
@@ -2511,7 +2587,10 @@ class TestDataArray:
         # test GH3000
         a = orig[:0, :1].stack(new_dim=("x", "y")).indexes["new_dim"]
         b = pd.MultiIndex(
-            levels=[pd.Index([], dtype=np.int64), pd.Index([0], dtype=np.int64)],
+            levels=[
+                pd.Index([], dtype=np.int64),  # type: ignore[list-item,unused-ignore]
+                pd.Index([0], dtype=np.int64),  # type: ignore[list-item,unused-ignore]
+            ],
             codes=[[], []],
             names=["x", "y"],
         )
@@ -2872,7 +2951,7 @@ class TestDataArray:
         expected = DataArray(
             orig.data.mean(keepdims=True),
             dims=orig.dims,
-            coords={k: v for k, v in coords.items() if k in ["c"]},
+            coords={k: v for k, v in coords.items() if k == "c"},
         )
         assert_equal(actual, expected)
 
@@ -2960,6 +3039,9 @@ class TestDataArray:
 
         np.testing.assert_allclose(actual.values, expected)
 
+    @pytest.mark.filterwarnings(
+        "default:The `interpolation` argument to quantile was renamed to `method`:FutureWarning"
+    )
     @pytest.mark.parametrize("method", ["midpoint", "lower"])
     def test_quantile_interpolation_deprecated(self, method) -> None:
         da = DataArray(self.va)
@@ -3067,7 +3149,7 @@ class TestDataArray:
         x2 = np.arange(5, 35)
         a = DataArray(np.random.random((30,)).astype(np.float32), [("x", x1)])
         b = DataArray(np.random.random((30,)).astype(np.float32), [("x", x2)])
-        c, d = align(a, b, join="outer")
+        c, _d = align(a, b, join="outer")
         assert c.dtype == np.float32
 
     def test_align_copy(self) -> None:
@@ -3442,6 +3524,18 @@ class TestDataArray:
         assert_array_equal(index_pd.levels[1], ["a", "b"])
         assert_array_equal(index_pd.levels[2], [5, 6, 7])
 
+        # test converting a dataframe MultiIndexed along a single dimension
+        mindex_single = pd.MultiIndex.from_product(
+            [list(range(6)), list("ab")], names=["A", "B"]
+        )
+
+        arr_multi_single = DataArray(
+            arr_np.flatten(), [("MI", mindex_single)], dims="MI", name="test"
+        )
+        actual_df = arr_multi_single.to_dataframe()
+        expected_df = arr_multi_single.to_series().to_frame()
+        assert expected_df.equals(actual_df)
+
     def test_to_dataframe_0length(self) -> None:
         # regression test for #3008
         arr_np = np.random.randn(4, 0)
@@ -3454,6 +3548,34 @@ class TestDataArray:
         assert len(actual) == 0
         assert_array_equal(actual.index.names, list("ABC"))
 
+    @pytest.mark.parametrize(
+        "x_dtype,y_dtype,v_dtype",
+        [
+            (np.uint32, np.float32, np.uint32),
+            (np.int16, np.float64, np.int64),
+            (np.uint8, np.float32, np.uint16),
+            (np.int32, np.float32, np.int8),
+        ],
+    )
+    def test_to_dataframe_coord_dtypes_2d(self, x_dtype, y_dtype, v_dtype) -> None:
+        x = np.array([1], dtype=x_dtype)
+        y = np.array([1.0], dtype=y_dtype)
+        v = np.array([[42]], dtype=v_dtype)
+
+        da = DataArray(v, dims=["x", "y"], coords={"x": x, "y": y})
+        df = da.to_dataframe(name="v").reset_index()
+
+        # Check that coordinate dtypes are preserved
+        assert df["x"].dtype == np.dtype(x_dtype), (
+            f"x coord: expected {x_dtype}, got {df['x'].dtype}"
+        )
+        assert df["y"].dtype == np.dtype(y_dtype), (
+            f"y coord: expected {y_dtype}, got {df['y'].dtype}"
+        )
+        assert df["v"].dtype == np.dtype(v_dtype), (
+            f"v data: expected {v_dtype}, got {df['v'].dtype}"
+        )
+
     @requires_dask_expr
     @requires_dask
     @pytest.mark.xfail(not has_dask_ge_2025_1_0, reason="dask-expr is broken")
@@ -3545,7 +3667,9 @@ class TestDataArray:
         # regression test for GH4019
         import sparse
 
-        idx = pd.MultiIndex.from_product([np.arange(3), np.arange(5)], names=["a", "b"])
+        idx = pd.MultiIndex.from_product(
+            [list(np.arange(3)), list(np.arange(5))], names=["a", "b"]
+        )
         series: pd.Series = pd.Series(
             np.random.default_rng(0).random(len(idx)), index=idx
         ).sample(n=5, random_state=3)
@@ -3589,7 +3713,7 @@ class TestDataArray:
 
         s = pd.Series(np.arange(5), index=pd.CategoricalIndex(list("aabbc")))
         arr = DataArray(s)
-        assert "'a'" in repr(arr)  # should not error
+        assert "a a b b" in repr(arr)  # should not error
 
     @pytest.mark.parametrize("use_dask", [True, False])
     @pytest.mark.parametrize("data", ["list", "array", True])
@@ -3759,7 +3883,7 @@ class TestDataArray:
         v = range(N)
         da = DataArray(v)
         ma = da.to_masked_array()
-        assert len(ma.mask) == N
+        assert isinstance(ma.mask, np.ndarray) and len(ma.mask) == N
 
     def test_to_dataset_whole(self) -> None:
         unnamed = DataArray([1, 2], dims="x")
@@ -3877,7 +4001,7 @@ class TestDataArray:
         assert "" == array._title_for_slice()
         assert "c = 0" == array.isel(c=0)._title_for_slice()
         title = array.isel(b=1, c=0)._title_for_slice()
-        assert "b = 1, c = 0" == title or "c = 0, b = 1" == title
+        assert title in {"b = 1, c = 0", "c = 0, b = 1"}
 
         a2 = DataArray(np.ones((4, 1)), dims=["a", "b"])
         assert "" == a2._title_for_slice()
@@ -4174,7 +4298,7 @@ class TestDataArray:
         missing_0 = xr.DataArray(coords_r, [(dim, coords_r)])
         with xr.set_options(arithmetic_join=align_type):
             actual = missing_0 + missing_3
-        missing_0_aligned, missing_3_aligned = xr.align(
+        _missing_0_aligned, _missing_3_aligned = xr.align(
             missing_0, missing_3, join=align_type
         )
         expected = xr.DataArray([np.nan, 2, 4, np.nan], [(dim, [0, 1, 2, 3])])
@@ -7040,7 +7164,7 @@ def test_clip(da: DataArray) -> None:
     assert_array_equal(result.isel(time=[0, 1]), with_nans.isel(time=[0, 1]))
 
     # Unclear whether we want this work, OK to adjust the test when we have decided.
-    with pytest.raises(ValueError, match="cannot reindex or align along dimension.*"):
+    with pytest.raises(ValueError, match=r"cannot reindex or align along dimension.*"):
         result = da.clip(min=da.mean("x"), max=da.mean("a").isel(x=[0, 1]))
 
 
diff -pruN 2025.03.1-8/xarray/tests/test_dataarray_typing.yml 2025.10.1-1/xarray/tests/test_dataarray_typing.yml
--- 2025.03.1-8/xarray/tests/test_dataarray_typing.yml	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/xarray/tests/test_dataarray_typing.yml	2025-10-10 10:38:05.000000000 +0000
@@ -107,6 +107,9 @@
     main:7: note:     def [T] pipe(self, func: tuple[Callable[..., T], str], *args: Any, **kwargs: Any) -> T
 
 - case: test_mypy_pipe_function_unexpected_keyword
+  skip: True # mypy 1.18+ outputs "defined here" notes without line numbers (e.g., "xarray/core/common.py: note:...")
+  # pytest-mypy-plugins expects all lines to match "file:line: severity: message" format and can't parse these notes.
+  # This is a mypy behavior, not a bug. The test would need pytest-mypy-plugins to support notes without line numbers.
   main: |
     from xarray import DataArray
 
@@ -117,6 +120,7 @@
     da = DataArray().pipe(f, 42, kw=99)
   out: |
     main:7: error: Unexpected keyword argument "kw" for "pipe" of "DataWithCoords"  [call-arg]
+    # Note: mypy 1.18.1 also outputs: xarray/core/common: note: "pipe" of "DataWithCoords" defined here
 
 - case: test_mypy_pipe_tuple_return_type_dataarray
   main: |
diff -pruN 2025.03.1-8/xarray/tests/test_dataset.py 2025.10.1-1/xarray/tests/test_dataset.py
--- 2025.03.1-8/xarray/tests/test_dataset.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/xarray/tests/test_dataset.py	2025-10-10 10:38:05.000000000 +0000
@@ -8,11 +8,12 @@ from collections.abc import Hashable
 from copy import copy, deepcopy
 from io import StringIO
 from textwrap import dedent
-from typing import Any, Literal
+from typing import Any, Literal, cast
 
 import numpy as np
 import pandas as pd
 import pytest
+from packaging.version import Version
 from pandas.core.indexes.datetimes import DatetimeIndex
 
 # remove once numpy 2.0 is the oldest supported version
@@ -21,8 +22,13 @@ try:
 except ImportError:
     from numpy import RankWarning  # type: ignore[no-redef,attr-defined,unused-ignore]
 
+import contextlib
+
+from pandas.errors import UndefinedVariableError
+
 import xarray as xr
 from xarray import (
+    AlignmentError,
     DataArray,
     Dataset,
     IndexVariable,
@@ -41,7 +47,7 @@ from xarray.core.coordinates import Coor
 from xarray.core.indexes import Index, PandasIndex
 from xarray.core.types import ArrayLike
 from xarray.core.utils import is_scalar
-from xarray.groupers import TimeResampler
+from xarray.groupers import SeasonResampler, TimeResampler
 from xarray.namedarray.pycompat import array_type, integer_types
 from xarray.testing import _assert_internal_invariants
 from xarray.tests import (
@@ -57,6 +63,7 @@ from xarray.tests import (
     create_test_data,
     has_cftime,
     has_dask,
+    has_pyarrow,
     raise_if_dask_computes,
     requires_bottleneck,
     requires_cftime,
@@ -68,18 +75,10 @@ from xarray.tests import (
     requires_sparse,
     source_ndarray,
 )
+from xarray.tests.indexes import ScalarIndex, XYIndex
 
-try:
-    from pandas.errors import UndefinedVariableError
-except ImportError:
-    # TODO: remove once we stop supporting pandas<1.4.3
-    from pandas.core.computation.ops import UndefinedVariableError
-
-
-try:
+with contextlib.suppress(ImportError):
     import dask.array as da
-except ImportError:
-    pass
 
 # from numpy version 2.0 trapz is deprecated and renamed to trapezoid
 # remove once numpy 2.0 is the oldest supported version
@@ -279,28 +278,31 @@ class AccessibleAsDuckArrayDataStore(bac
 
 class TestDataset:
     def test_repr(self) -> None:
-        data = create_test_data(seed=123)
+        data = create_test_data(seed=123, use_extension_array=True)
         data.attrs["foo"] = "bar"
         # need to insert str dtype at runtime to handle different endianness
+        var5 = (
+            "\n                var5     (dim1) int64[pyarrow] 64B 5 9 7 2 6 2 8 1"
+            if has_pyarrow
+            else ""
+        )
         expected = dedent(
-            """\
+            f"""\
             <xarray.Dataset> Size: 2kB
             Dimensions:  (dim2: 9, dim3: 10, time: 20, dim1: 8)
             Coordinates:
               * dim2     (dim2) float64 72B 0.0 0.5 1.0 1.5 2.0 2.5 3.0 3.5 4.0
-              * dim3     (dim3) {} 40B 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j'
-              * time     (time) datetime64[{}] 160B 2000-01-01 2000-01-02 ... 2000-01-20
+              * dim3     (dim3) {data["dim3"].dtype} 40B 'a' 'b' 'c' 'd' 'e' 'f' 'g' 'h' 'i' 'j'
+              * time     (time) datetime64[ns] 160B 2000-01-01 2000-01-02 ... 2000-01-20
                 numbers  (dim3) int64 80B 0 1 2 0 0 1 1 2 2 3
             Dimensions without coordinates: dim1
             Data variables:
                 var1     (dim1, dim2) float64 576B -0.9891 -0.3678 1.288 ... -0.2116 0.364
                 var2     (dim1, dim2) float64 576B 0.953 1.52 1.704 ... 0.1347 -0.6423
                 var3     (dim3, dim1) float64 640B 0.4107 0.9941 0.1665 ... 0.716 1.555
+                var4     (dim1) category 3{6 if Version(pd.__version__) >= Version("3.0.0dev0") else 2}B b c b a c a c a{var5}
             Attributes:
-                foo:      bar""".format(
-                data["dim3"].dtype,
-                "ns",
-            )
+                foo:      bar"""
         )
         actual = "\n".join(x.rstrip() for x in repr(data).split("\n"))
 
@@ -633,7 +635,7 @@ class TestDataset:
         with pytest.raises(ValueError, match=r"conflicting MultiIndex"):
             with pytest.warns(
                 FutureWarning,
-                match=".*`pandas.MultiIndex`.*no longer be implicitly promoted.*",
+                match=r".*`pandas.MultiIndex`.*no longer be implicitly promoted.*",
             ):
                 Dataset({}, {"x": mindex, "y": mindex})
                 Dataset({}, {"x": mindex, "level_1": range(4)})
@@ -653,13 +655,13 @@ class TestDataset:
 
         with pytest.warns(
             FutureWarning,
-            match=".*`pandas.MultiIndex`.*no longer be implicitly promoted.*",
+            match=r".*`pandas.MultiIndex`.*no longer be implicitly promoted.*",
         ):
             Dataset(data_vars={"x": midx})
 
         with pytest.warns(
             FutureWarning,
-            match=".*`pandas.MultiIndex`.*no longer be implicitly promoted.*",
+            match=r".*`pandas.MultiIndex`.*no longer be implicitly promoted.*",
         ):
             Dataset(coords={"x": midx})
 
@@ -688,7 +690,7 @@ class TestDataset:
         assert type(ds.dims.mapping) is dict
         with pytest.warns(
             FutureWarning,
-            match=" To access a mapping from dimension names to lengths, please use `Dataset.sizes`",
+            match=r" To access a mapping from dimension names to lengths, please use `Dataset.sizes`",
         ):
             assert ds.dims == ds.sizes
         assert ds.sizes == {"dim1": 8, "dim2": 9, "dim3": 10, "time": 20}
@@ -1082,7 +1084,7 @@ class TestDataset:
 
         # https://github.com/pydata/xarray/issues/7588
         with pytest.raises(
-            AssertionError, match="something is wrong with Dataset._coord_names"
+            AssertionError, match=r"something is wrong with Dataset._coord_names"
         ):
             ds._coord_names = {"w", "x", "y", "z"}
             len(ds.data_vars)
@@ -1135,6 +1137,111 @@ class TestDataset:
         assert ds.chunks == {}
 
     @requires_dask
+    @pytest.mark.parametrize(
+        "use_cftime,calendar",
+        [
+            (False, "standard"),
+            (pytest.param(True, marks=pytest.mark.skipif(not has_cftime)), "standard"),
+            (pytest.param(True, marks=pytest.mark.skipif(not has_cftime)), "noleap"),
+            (pytest.param(True, marks=pytest.mark.skipif(not has_cftime)), "360_day"),
+        ],
+    )
+    def test_chunk_by_season_resampler(self, use_cftime: bool, calendar: str) -> None:
+        import dask.array
+
+        N = 365 + 365  # 2 years - 1 day
+        time = xr.date_range(
+            "2000-01-01", periods=N, freq="D", use_cftime=use_cftime, calendar=calendar
+        )
+
+        ds = Dataset(
+            {
+                "pr": ("time", dask.array.random.random((N), chunks=(20))),
+                "pr2d": (("x", "time"), dask.array.random.random((10, N), chunks=(20))),
+                "ones": ("time", np.ones((N,))),
+            },
+            coords={"time": time},
+        )
+
+        # Standard seasons
+        rechunked = ds.chunk(
+            {"x": 2, "time": SeasonResampler(["DJF", "MAM", "JJA", "SON"])}
+        )
+        assert rechunked.chunksizes["x"] == (2,) * 5
+        assert len(rechunked.chunksizes["time"]) == 9
+        assert rechunked.chunksizes["x"] == (2,) * 5
+        assert sum(rechunked.chunksizes["time"]) == ds.sizes["time"]
+
+        if calendar == "standard":
+            assert rechunked.chunksizes["time"] == (60, 92, 92, 91, 90, 92, 92, 91, 30)
+        elif calendar == "noleap":
+            assert rechunked.chunksizes["time"] == (59, 92, 92, 91, 90, 92, 92, 91, 31)
+        elif calendar == "360_day":
+            assert rechunked.chunksizes["time"] == (60, 90, 90, 90, 90, 90, 90, 90, 40)
+        else:
+            raise AssertionError("unreachable")
+
+        # Custom seasons
+        rechunked = ds.chunk(
+            {"x": 2, "time": SeasonResampler(["DJFM", "AM", "JJA", "SON"])}
+        )
+        assert len(rechunked.chunksizes["time"]) == 9
+        assert sum(rechunked.chunksizes["time"]) == ds.sizes["time"]
+        assert rechunked.chunksizes["x"] == (2,) * 5
+
+        if calendar == "standard":
+            assert rechunked.chunksizes["time"] == (91, 61, 92, 91, 121, 61, 92, 91, 30)
+        elif calendar == "noleap":
+            assert rechunked.chunksizes["time"] == (90, 61, 92, 91, 121, 61, 92, 91, 31)
+        elif calendar == "360_day":
+            assert rechunked.chunksizes["time"] == (90, 60, 90, 90, 120, 60, 90, 90, 40)
+        else:
+            raise AssertionError("unreachable")
+
+        # Test that drop_incomplete doesn't affect chunking
+        rechunked_drop_true = ds.chunk(
+            time=SeasonResampler(["DJF", "MAM", "JJA", "SON"], drop_incomplete=True)
+        )
+        rechunked_drop_false = ds.chunk(
+            time=SeasonResampler(["DJF", "MAM", "JJA", "SON"], drop_incomplete=False)
+        )
+        assert (
+            rechunked_drop_true.chunksizes["time"]
+            == rechunked_drop_false.chunksizes["time"]
+        )
+
+    @requires_dask
+    def test_chunk_by_season_resampler_errors(self):
+        """Test error handling for SeasonResampler chunking."""
+        # Test error on missing season (should fail with incomplete seasons)
+        ds = Dataset(
+            {"x": ("time", np.arange(12))},
+            coords={"time": pd.date_range("2000-01-01", periods=12, freq="MS")},
+        )
+        with pytest.raises(ValueError, match="does not cover all 12 months"):
+            ds.chunk(time=SeasonResampler(["DJF", "MAM", "SON"]))
+
+        ds = Dataset({"foo": ("x", [1, 2, 3])})
+        # Test error on virtual variable
+        with pytest.raises(ValueError, match="virtual variable"):
+            ds.chunk(x=SeasonResampler(["DJF", "MAM", "JJA", "SON"]))
+
+        # Test error on non-datetime variable
+        ds["x"] = ("x", [1, 2, 3])
+        with pytest.raises(ValueError, match="datetime variables"):
+            ds.chunk(x=SeasonResampler(["DJF", "MAM", "JJA", "SON"]))
+
+        # Test successful case with 1D datetime variable
+        ds["x"] = ("x", xr.date_range("2001-01-01", periods=3, freq="D"))
+        # This should work
+        result = ds.chunk(x=SeasonResampler(["DJF", "MAM", "JJA", "SON"]))
+        assert result.chunks is not None
+
+        # Test error on missing season (should fail with incomplete seasons)
+        with pytest.raises(ValueError):
+            ds.chunk(x=SeasonResampler(["DJF", "MAM", "SON"]))
+
+    @requires_dask
     def test_chunk(self) -> None:
         data = create_test_data()
         for v in data.variables.values():
@@ -1213,7 +1320,7 @@ class TestDataset:
         import dask.array
 
         N = 365 * 2
-        ΔN = 28
+        ΔN = 28  # noqa: PLC2401
         time = xr.date_range(
             "2001-01-01", periods=N + ΔN, freq="D", calendar=calendar
         ).to_numpy(copy=True)
@@ -1597,32 +1704,8 @@ class TestDataset:
         # regression test https://github.com/pydata/xarray/issues/10063
         # isel on a multi-coordinate index should return a unique index associated
         # to each coordinate
-        class MultiCoordIndex(xr.Index):
-            def __init__(self, idx1, idx2):
-                self.idx1 = idx1
-                self.idx2 = idx2
-
-            @classmethod
-            def from_variables(cls, variables, *, options=None):
-                idx1 = PandasIndex.from_variables(
-                    {"x": variables["x"]}, options=options
-                )
-                idx2 = PandasIndex.from_variables(
-                    {"y": variables["y"]}, options=options
-                )
-
-                return cls(idx1, idx2)
-
-            def create_variables(self, variables=None):
-                return {**self.idx1.create_variables(), **self.idx2.create_variables()}
-
-            def isel(self, indexers):
-                idx1 = self.idx1.isel({"x": indexers.get("x", slice(None))})
-                idx2 = self.idx2.isel({"y": indexers.get("y", slice(None))})
-                return MultiCoordIndex(idx1, idx2)
-
         coords = xr.Coordinates(coords={"x": [0, 1], "y": [1, 2]}, indexes={})
-        ds = xr.Dataset(coords=coords).set_xindex(["x", "y"], MultiCoordIndex)
+        ds = xr.Dataset(coords=coords).set_xindex(["x", "y"], XYIndex)
 
         ds2 = ds.isel(x=slice(None), y=slice(None))
         assert ds2.xindexes["x"] is ds2.xindexes["y"]
@@ -1813,7 +1896,7 @@ class TestDataset:
         actual3 = ds.unstack("index")
         assert actual3["var"].shape == (2, 2)
 
-    def test_categorical_reindex(self) -> None:
+    def test_categorical_index_reindex(self) -> None:
         cat = pd.CategoricalIndex(
             ["foo", "bar", "baz"],
             categories=["foo", "bar", "baz", "qux", "quux", "corge"],
@@ -1825,6 +1908,32 @@ class TestDataset:
         actual = ds.reindex(cat=["foo"])["cat"].values
         assert (actual == np.array(["foo"])).all()
 
+    @pytest.mark.parametrize("fill_value", [np.nan, pd.NA])
+    def test_extensionarray_negative_reindex(self, fill_value) -> None:
+        cat = pd.Categorical(
+            ["foo", "bar", "baz"],
+            categories=["foo", "bar", "baz", "qux", "quux", "corge"],
+        )
+        ds = xr.Dataset(
+            {"cat": ("index", cat)},
+            coords={"index": ("index", np.arange(3))},
+        )
+        reindexed_cat = cast(
+            pd.api.extensions.ExtensionArray,
+            (
+                ds.reindex(index=[-1, 1, 1], fill_value=fill_value)["cat"]
+                .to_pandas()
+                .values
+            ),
+        )
+        assert reindexed_cat.equals(pd.array([pd.NA, "bar", "bar"], dtype=cat.dtype))  # type: ignore[attr-defined]
+
+    def test_extension_array_reindex_same(self) -> None:
+        series = pd.Series([1, 2, pd.NA, 3], dtype=pd.Int32Dtype())
+        test = xr.Dataset({"test": series})
+        res = test.reindex(dim_0=series.index)
+        align(res, test, join="exact")
+
     def test_categorical_multiindex(self) -> None:
         i1 = pd.Series([0, 0])
         cat = pd.CategoricalDtype(categories=["foo", "baz", "bar"])
@@ -2366,6 +2475,19 @@ class TestDataset:
         assert_identical(expected, actual)
         assert actual.x.dtype == expected.x.dtype
 
+    def test_reindex_with_multiindex_level(self) -> None:
+        # test for https://github.com/pydata/xarray/issues/10347
+        mindex = pd.MultiIndex.from_product(
+            [[100, 200, 300], [1, 2, 3, 4]], names=["x", "y"]
+        )
+        y_idx = PandasIndex(mindex.levels[1], "y")
+
+        ds1 = xr.Dataset(coords={"y": [1, 2, 3]})
+        ds2 = xr.Dataset(coords=xr.Coordinates.from_xindex(y_idx))
+
+        actual = ds1.reindex(y=ds2.y)
+        assert_identical(actual, ds2)
+
     @pytest.mark.parametrize("fill_value", [dtypes.NA, 2, 2.0, {"foo": 2, "bar": 1}])
     def test_align_fill_value(self, fill_value) -> None:
         x = Dataset({"foo": DataArray([1, 2], dims=["x"], coords={"x": [1, 2]})})
@@ -2543,6 +2665,28 @@ class TestDataset:
 
         assert_identical(expected_x2, x2)
 
+    def test_align_multiple_indexes_common_dim(self) -> None:
+        a = Dataset(coords={"x": [1, 2], "xb": ("x", [3, 4])}).set_xindex("xb")
+        b = Dataset(coords={"x": [1], "xb": ("x", [3])}).set_xindex("xb")
+
+        (a2, b2) = align(a, b, join="inner")
+        assert_identical(a2, b, check_default_indexes=False)
+        assert_identical(b2, b, check_default_indexes=False)
+
+        c = Dataset(coords={"x": [1, 3], "xb": ("x", [2, 4])}).set_xindex("xb")
+
+        with pytest.raises(AlignmentError, match=r".*conflicting re-indexers"):
+            align(a, c)
+
+    def test_align_conflicting_indexes(self) -> None:
+        class CustomIndex(PandasIndex): ...
+
+        a = Dataset(coords={"xb": ("x", [3, 4])}).set_xindex("xb")
+        b = Dataset(coords={"xb": ("x", [3])}).set_xindex("xb", CustomIndex)
+
+        with pytest.raises(AlignmentError, match=r"cannot align.*conflicting indexes"):
+            align(a, b)
+
     def test_align_non_unique(self) -> None:
         x = Dataset({"foo": ("x", [3, 4, 5]), "x": [0, 0, 1]})
         x1, x2 = align(x, x)
@@ -2586,6 +2730,61 @@ class TestDataset:
         assert ds.x.attrs == {"units": "m"}
         assert ds_noattr.x.attrs == {}
 
+    def test_align_scalar_index(self) -> None:
+        # ensure that indexes associated with scalar coordinates are not ignored
+        # during alignment
+        ds1 = Dataset(coords={"x": 0}).set_xindex("x", ScalarIndex)
+        ds2 = Dataset(coords={"x": 0}).set_xindex("x", ScalarIndex)
+
+        actual = xr.align(ds1, ds2, join="exact")
+        assert_identical(actual[0], ds1, check_default_indexes=False)
+        assert_identical(actual[1], ds2, check_default_indexes=False)
+
+        ds3 = Dataset(coords={"x": 1}).set_xindex("x", ScalarIndex)
+
+        with pytest.raises(AlignmentError, match="cannot align objects"):
+            xr.align(ds1, ds3, join="exact")
+
+    def test_align_multi_dim_index_exclude_dims(self) -> None:
+        ds1 = (
+            Dataset(coords={"x": [1, 2], "y": [3, 4]})
+            .drop_indexes(["x", "y"])
+            .set_xindex(["x", "y"], XYIndex)
+        )
+        ds2 = (
+            Dataset(coords={"x": [1, 2], "y": [5, 6]})
+            .drop_indexes(["x", "y"])
+            .set_xindex(["x", "y"], XYIndex)
+        )
+
+        for join in ("outer", "exact"):
+            actual = xr.align(ds1, ds2, join=join, exclude="y")
+            assert_identical(actual[0], ds1, check_default_indexes=False)
+            assert_identical(actual[1], ds2, check_default_indexes=False)
+
+        with pytest.raises(
+            AlignmentError, match=r"cannot align objects.*index.*not equal"
+        ):
+            xr.align(ds1, ds2, join="exact")
+
+        with pytest.raises(AlignmentError, match="cannot exclude dimension"):
+            xr.align(ds1, ds2, join="override", exclude="y")
+
+    def test_align_index_equals_future_warning(self) -> None:
+        # TODO: remove this test once the deprecation cycle is completed
+        class DeprecatedEqualsSignatureIndex(PandasIndex):
+            def equals(self, other: Index) -> bool:  # type: ignore[override]
+                return super().equals(other, exclude=None)
+
+        ds = (
+            Dataset(coords={"x": [1, 2]})
+            .drop_indexes("x")
+            .set_xindex("x", DeprecatedEqualsSignatureIndex)
+        )
+
+        with pytest.warns(FutureWarning, match=r"signature.*deprecated"):
+            xr.align(ds, ds.copy(), join="exact")
+
     def test_broadcast(self) -> None:
         ds = Dataset(
             {"foo": 0, "bar": ("x", [1]), "baz": ("y", [2, 3])}, {"c": ("x", [4])}
@@ -2624,7 +2823,7 @@ class TestDataset:
         assert_identical(x, actual_x)
         assert source_ndarray(actual_x["foo"].data) is source_ndarray(x["foo"].data)
 
-        actual_x, actual_y = broadcast(x, y)
+        actual_x, _actual_y = broadcast(x, y)
         assert_identical(x, actual_x)
         assert source_ndarray(actual_x["foo"].data) is source_ndarray(x["foo"].data)
 
@@ -2927,7 +3126,7 @@ class TestDataset:
         midx_coords = Coordinates.from_pandas_multiindex(midx, "x")
         ds = Dataset(coords=midx_coords)
 
-        with pytest.raises(ValueError, match=".*would corrupt the following index.*"):
+        with pytest.raises(ValueError, match=r".*would corrupt the following index.*"):
             ds.drop_indexes("a")
 
     def test_drop_dims(self) -> None:
@@ -3198,14 +3397,14 @@ class TestDataset:
         ds = Dataset(coords={"x": ("y", [0, 1])})
 
         with pytest.warns(
-            UserWarning, match="rename 'x' to 'y' does not create an index.*"
+            UserWarning, match=r"rename 'x' to 'y' does not create an index.*"
         ):
             ds.rename(x="y")
 
         ds = Dataset(coords={"y": ("x", [0, 1])})
 
         with pytest.warns(
-            UserWarning, match="rename 'x' to 'y' does not create an index.*"
+            UserWarning, match=r"rename 'x' to 'y' does not create an index.*"
         ):
             ds.rename(x="y")
 
@@ -3591,7 +3790,7 @@ class TestDataset:
     def test_set_index(self) -> None:
         expected = create_test_multiindex()
         mindex = expected["x"].to_index()
-        indexes = [mindex.get_level_values(n) for n in mindex.names]
+        indexes = [mindex.get_level_values(str(n)) for n in mindex.names]
         coords = {idx.name: ("x", idx) for idx in indexes}
         ds = Dataset({}, coords=coords)
 
@@ -3652,7 +3851,7 @@ class TestDataset:
     def test_reset_index(self) -> None:
         ds = create_test_multiindex()
         mindex = ds["x"].to_index()
-        indexes = [mindex.get_level_values(n) for n in mindex.names]
+        indexes = [mindex.get_level_values(str(n)) for n in mindex.names]
         coords = {idx.name: ("x", idx) for idx in indexes}
         expected = Dataset({}, coords=coords)
 
@@ -3747,7 +3946,7 @@ class TestDataset:
 
         class NotAnIndex: ...
 
-        with pytest.raises(TypeError, match=".*not a subclass of xarray.Index"):
+        with pytest.raises(TypeError, match=r".*not a subclass of xarray.Index"):
             ds.set_xindex("foo", NotAnIndex)  # type: ignore[arg-type]
 
         with pytest.raises(ValueError, match="those variables don't exist"):
@@ -4098,6 +4297,33 @@ class TestDataset:
             expected_stacked_variable,
         )
 
+    def test_to_stacked_array_transposed(self) -> None:
+        # test that to_stacked_array uses updated dim order after transposition
+        ds = xr.Dataset(
+            data_vars=dict(
+                v1=(["d1", "d2"], np.arange(6).reshape((2, 3))),
+            ),
+            coords=dict(
+                d1=(["d1"], np.arange(2)),
+                d2=(["d2"], np.arange(3)),
+            ),
+        )
+        da = ds.to_stacked_array(
+            new_dim="new_dim",
+            sample_dims=[],
+            variable_dim="variable",
+        )
+        dsT = ds.transpose()
+        daT = dsT.to_stacked_array(
+            new_dim="new_dim",
+            sample_dims=[],
+            variable_dim="variable",
+        )
+        v1 = np.arange(6)
+        v1T = np.arange(6).reshape((2, 3)).T.flatten()
+        np.testing.assert_equal(da.to_numpy(), v1)
+        np.testing.assert_equal(daT.to_numpy(), v1T)
+
     def test_update(self) -> None:
         data = create_test_data(seed=0)
         expected = data.copy()
@@ -4206,6 +4432,26 @@ class TestDataset:
         dataset = Dataset({key: ("dim0", range(1)) for key in keys})
         assert_identical(dataset, dataset[keys])
 
+    def test_getitem_extra_dim_index_coord(self) -> None:
+        class AnyIndex(Index):
+            def should_add_coord_to_array(self, name, var, dims):
+                return True
+
+        idx = AnyIndex()
+        coords = Coordinates(
+            coords={
+                "x": ("x", [1, 2]),
+                "x_bounds": (("x", "x_bnds"), [(0.5, 1.5), (1.5, 2.5)]),
+            },
+            indexes={"x": idx, "x_bounds": idx},
+        )
+
+        ds = Dataset({"foo": (("x"), [1.0, 2.0])}, coords=coords)
+        actual = ds["foo"]
+
+        assert_identical(actual.coords, coords, check_default_indexes=False)
+        assert "x_bnds" not in actual.dims
+
     def test_virtual_variables_default_coords(self) -> None:
         dataset = Dataset({"foo": ("x", range(10))})
         expected1 = DataArray(range(10), dims="x", name="x")
@@ -4354,7 +4600,6 @@ class TestDataset:
         ds["x"] = np.arange(3)
         ds_copy = ds.copy()
         ds_copy["bar"] = ds["bar"].to_pandas()
-
         assert_equal(ds, ds_copy)
 
     def test_setitem_auto_align(self) -> None:
@@ -4619,7 +4864,7 @@ class TestDataset:
 
         with pytest.warns(
             FutureWarning,
-            match=".*`pandas.MultiIndex`.*no longer be implicitly promoted.*",
+            match=r".*`pandas.MultiIndex`.*no longer be implicitly promoted.*",
         ):
             actual = ds.assign(x=midx)
         assert_identical(actual, expected)
@@ -4636,7 +4881,7 @@ class TestDataset:
 
         with pytest.warns(
             FutureWarning,
-            match=".*`pandas.MultiIndex`.*no longer be implicitly promoted.*",
+            match=r".*`pandas.MultiIndex`.*no longer be implicitly promoted.*",
         ):
             actual = ds.assign_coords({"x": midx})
         assert_identical(actual, expected)
@@ -4875,7 +5120,6 @@ class TestDataset:
         # from_dataframe attempts to broadcast across because it doesn't know better, so cat must be converted
         ds["cat"] = (("x", "y"), np.stack((ds["cat"].to_numpy(), ds["cat"].to_numpy())))
         assert_identical(ds.assign_coords(x=[0, 1]), Dataset.from_dataframe(actual))
-
         # Check multiindex reordering
         new_order = ["x", "y"]
         # revert broadcasting fix above for 1d arrays
@@ -4909,6 +5153,41 @@ class TestDataset:
         ):
             ds.to_dataframe(dim_order=invalid_order)
 
+        # test a case with a MultiIndex along a single dimension
+        data_dict = dict(
+            x=[1, 2, 1, 2, 1], y=["a", "a", "b", "b", "b"], z=[5, 10, 15, 20, 25]
+        )
+        data_dict_w_dims = {k: ("single_dim", v) for k, v in data_dict.items()}
+
+        # Dataset multi-indexed along "single_dim" by "x" and "y"
+        ds = Dataset(data_dict_w_dims).set_coords(["x", "y"]).set_xindex(["x", "y"])
+        expected = pd.DataFrame(data_dict).set_index(["x", "y"])
+        actual = ds.to_dataframe()
+        assert expected.equals(actual)
+        # should be possible to reset index, as there should be no duplication
+        # between index and columns, and dataframes should still be equal
+        assert expected.reset_index().equals(actual.reset_index())
+
+        # MultiIndex deduplication should not affect other coordinates.
+        mindex_single = pd.MultiIndex.from_product(
+            [list(range(6)), list("ab")], names=["A", "B"]
+        )
+        ds = DataArray(
+            range(12), [("MI", mindex_single)], dims="MI", name="test"
+        )._to_dataset_whole()
+        ds.coords["C"] = "a single value"
+        ds.coords["D"] = ds.coords["A"] ** 2
+        expected = pd.DataFrame(
+            dict(
+                test=range(12),
+                C="a single value",
+                D=[0, 0, 1, 1, 4, 4, 9, 9, 16, 16, 25, 25],
+            )
+        ).set_index(mindex_single)
+        actual = ds.to_dataframe()
+        assert expected.equals(actual)
+        assert expected.reset_index().equals(actual.reset_index())
+
         # check pathological cases
         df = pd.DataFrame([1])
         actual_ds = Dataset.from_dataframe(df)
@@ -4945,6 +5224,16 @@ class TestDataset:
         expected = pd.DataFrame([[]], index=idx)
         assert expected.equals(actual), (expected, actual)
 
+    def test_from_dataframe_categorical_dtype_index(self) -> None:
+        cat = pd.CategoricalIndex(list("abcd"))
+        df = pd.DataFrame({"f": [0, 1, 2, 3]}, index=cat)
+        ds = df.to_xarray()
+        restored = ds.to_dataframe()
+        df.index.name = (
+            "index"  # restored gets the name because it has the coord with the name
+        )
+        pd.testing.assert_frame_equal(df, restored)
+
     def test_from_dataframe_categorical_index(self) -> None:
         cat = pd.CategoricalDtype(
             categories=["foo", "bar", "baz", "qux", "quux", "corge"]
@@ -4969,7 +5258,7 @@ class TestDataset:
         )
         ser = pd.Series(1, index=cat)
         ds = ser.to_xarray()
-        assert ds.coords.dtypes["index"] == np.dtype("O")
+        assert ds.coords.dtypes["index"] == ser.index.dtype
 
     @requires_sparse
     def test_from_dataframe_sparse(self) -> None:
@@ -5048,7 +5337,7 @@ class TestDataset:
     def test_from_dataframe_non_unique_columns(self) -> None:
         # regression test for GH449
         df = pd.DataFrame(np.zeros((2, 2)))
-        df.columns = ["foo", "foo"]  # type: ignore[assignment]
+        df.columns = ["foo", "foo"]  # type: ignore[assignment,list-item,unused-ignore]
         with pytest.raises(ValueError, match=r"non-unique columns"):
             Dataset.from_dataframe(df)
 
@@ -5541,7 +5830,7 @@ class TestDataset:
             coords={
                 "x": [4, 3],
                 "y": [1, 2],
-                "z": (["x", "y"], [[np.e, np.pi], [np.pi * np.e, np.pi * 3]]),
+                "z": (["x", "y"], [[np.exp(1), np.pi], [np.pi * np.exp(1), np.pi * 3]]),
             },
         )
         expected7 = Dataset(
@@ -5690,20 +5979,21 @@ class TestDataset:
     def test_reduce_non_numeric(self) -> None:
         data1 = create_test_data(seed=44, use_extension_array=True)
         data2 = create_test_data(seed=44)
-        add_vars = {"var5": ["dim1", "dim2"], "var6": ["dim1"]}
+        add_vars = {"var6": ["dim1", "dim2"], "var7": ["dim1"]}
         for v, dims in sorted(add_vars.items()):
             size = tuple(data1.sizes[d] for d in dims)
             data = np.random.randint(0, 100, size=size).astype(np.str_)
             data1[v] = (dims, data, {"foo": "variable"})
-        # var4 is extension array categorical and should be dropped
+        # var4 and var5 are extension arrays and should be dropped
         assert (
             "var4" not in data1.mean()
             and "var5" not in data1.mean()
             and "var6" not in data1.mean()
+            and "var7" not in data1.mean()
         )
         assert_equal(data1.mean(), data2.mean())
         assert_equal(data1.mean(dim="dim1"), data2.mean(dim="dim1"))
-        assert "var5" not in data1.mean(dim="dim2") and "var6" in data1.mean(dim="dim2")
+        assert "var6" not in data1.mean(dim="dim2") and "var7" in data1.mean(dim="dim2")
 
     @pytest.mark.filterwarnings(
         "ignore:Once the behaviour of DataArray:DeprecationWarning"
@@ -5905,17 +6195,20 @@ class TestDataset:
         assert_identical(result.var2, ds.var2.quantile(q, method=method))
         assert_identical(result.var3, ds.var3.quantile(q, method=method))
 
+    @pytest.mark.filterwarnings(
+        "default:The `interpolation` argument to quantile was renamed to `method`:FutureWarning"
+    )
     @pytest.mark.parametrize("method", ["midpoint", "lower"])
     def test_quantile_interpolation_deprecated(self, method) -> None:
         ds = create_test_data(seed=123)
         q = [0.25, 0.5, 0.75]
 
-        with warnings.catch_warnings(record=True) as w:
+        with pytest.warns(
+            FutureWarning,
+            match="`interpolation` argument to quantile was renamed to `method`",
+        ):
             ds.quantile(q, interpolation=method)
 
-            # ensure the warning is only raised once
-            assert len(w) == 1
-
         with warnings.catch_warnings(record=True):
             with pytest.raises(TypeError, match="interpolation and method keywords"):
                 ds.quantile(q, method=method, interpolation=method)
@@ -5977,6 +6270,38 @@ class TestDataset:
         expected = data.drop_vars("time")  # time is not used on a data var
         assert_equal(expected, actual)
 
+    def test_map_coords_attrs(self) -> None:
+        ds = xr.Dataset(
+            {
+                "a": (
+                    ["x", "y", "z"],
+                    np.arange(24).reshape(3, 4, 2),
+                    {"attr1": "value1"},
+                ),
+                "b": ("y", np.arange(4), {"attr2": "value2"}),
+            },
+            coords={
+                "x": ("x", np.array([-1, 0, 1]), {"attr3": "value3"}),
+                "z": ("z", list("ab"), {"attr4": "value4"}),
+            },
+        )
+
+        def func(arr):
+            if "y" not in arr.dims:
+                return arr
+
+            # drop attrs from coords
+            return arr.mean(dim="y").drop_attrs()
+
+        expected = ds.mean(dim="y", keep_attrs=True)
+        actual = ds.map(func, keep_attrs=True)
+
+        assert_identical(actual, expected)
+        assert actual["x"].attrs
+
+        ds["x"].attrs["y"] = "x"
+        assert ds["x"].attrs != actual["x"].attrs
+
     def test_apply_pending_deprecated_map(self) -> None:
         data = create_test_data()
         data.attrs["foo"] = "bar"
@@ -6089,7 +6414,7 @@ class TestDataset:
         assert_equal(actual, expected)
 
         actual = ds + ds[["bar"]]
-        expected = (2 * ds[["bar"]]).merge(ds.coords)
+        expected = (2 * ds[["bar"]]).merge(ds.coords, compat="override")
         assert_identical(expected, actual)
 
         assert_identical(ds + Dataset(), ds.coords.to_dataset())
@@ -6486,8 +6811,8 @@ class TestDataset:
 
         expected = ds.copy(deep=True)
         # https://github.com/python/mypy/issues/3004
-        expected["d1"].values = [2, 2, 2]  # type: ignore[assignment]
-        expected["d2"].values = [2.0, 2.0, 2.0]  # type: ignore[assignment]
+        expected["d1"].values = [2, 2, 2]  # type: ignore[assignment,unused-ignore]
+        expected["d2"].values = [2.0, 2.0, 2.0]  # type: ignore[assignment,unused-ignore]
         assert expected["d1"].dtype == int
         assert expected["d2"].dtype == float
         assert_identical(expected, actual)
@@ -6495,8 +6820,8 @@ class TestDataset:
         # override dtype
         actual = full_like(ds, fill_value=True, dtype=bool)
         expected = ds.copy(deep=True)
-        expected["d1"].values = [True, True, True]  # type: ignore[assignment]
-        expected["d2"].values = [True, True, True]  # type: ignore[assignment]
+        expected["d1"].values = [True, True, True]  # type: ignore[assignment,unused-ignore]
+        expected["d2"].values = [True, True, True]  # type: ignore[assignment,unused-ignore]
         assert expected["d1"].dtype == bool
         assert expected["d2"].dtype == bool
         assert_identical(expected, actual)
@@ -6525,12 +6850,12 @@ class TestDataset:
             coords={"x": ["a", "b", "c"]},
         )
         assert_equal(actual, expected)
-        assert_equal(actual, xr.merge([dsx0, dsx1]))
+        assert_equal(actual, xr.merge([dsx0, dsx1], join="outer"))
 
         # works just like xr.merge([self, other])
         dsy2 = DataArray([2, 2, 2], [("x", ["b", "c", "d"])]).to_dataset(name="dsy2")
         actual = dsx0.combine_first(dsy2)
-        expected = xr.merge([dsy2, dsx0])
+        expected = xr.merge([dsy2, dsx0], join="outer")
         assert_equal(actual, expected)
 
     def test_sortby(self) -> None:
@@ -6840,11 +7165,7 @@ class TestDataset:
                 if utils.is_dict_like(constant_values):
                     if (
                         expected := constant_values.get(data_var_name, None)
-                    ) is not None:
-                        self._test_data_var_interior(
-                            ds[data_var_name], data_var, padded_dim_name, expected
-                        )
-                    elif (
+                    ) is not None or (
                         expected := constant_values.get(padded_dim_name, None)
                     ) is not None:
                         self._test_data_var_interior(
@@ -7409,7 +7730,7 @@ def test_cumulative_integrate(dask) -> N
     from scipy.integrate import cumulative_trapezoid
 
     expected_x = xr.DataArray(
-        cumulative_trapezoid(da.compute(), da["x"], axis=0, initial=0.0),
+        cumulative_trapezoid(da.compute(), da["x"], axis=0, initial=0.0),  # type: ignore[call-overload,unused-ignore]
         dims=["x", "y"],
         coords=da.coords,
     )
@@ -7425,7 +7746,7 @@ def test_cumulative_integrate(dask) -> N
     # along y
     actual = da.cumulative_integrate("y")
     expected_y = xr.DataArray(
-        cumulative_trapezoid(da, da["y"], axis=1, initial=0.0),
+        cumulative_trapezoid(da, da["y"], axis=1, initial=0.0),  # type: ignore[call-overload,unused-ignore]
         dims=["x", "y"],
         coords=da.coords,
     )
diff -pruN 2025.03.1-8/xarray/tests/test_dataset_typing.yml 2025.10.1-1/xarray/tests/test_dataset_typing.yml
--- 2025.03.1-8/xarray/tests/test_dataset_typing.yml	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/xarray/tests/test_dataset_typing.yml	2025-10-10 10:38:05.000000000 +0000
@@ -107,6 +107,8 @@
     main:7: note:     def [T] pipe(self, func: tuple[Callable[..., T], str], *args: Any, **kwargs: Any) -> T
 
 - case: test_mypy_pipe_function_unexpected_keyword
+  skip: True # mypy 1.18.1 outputs "defined here" notes without line numbers that pytest-mypy-plugins can't parse
+  # See: https://github.com/python/mypy/issues/19257 (mypy issue about missing line numbers)
   main: |
     from xarray import Dataset
 
@@ -117,6 +119,7 @@
     ds = Dataset().pipe(f, 42, kw=99)
   out: |
     main:7: error: Unexpected keyword argument "kw" for "pipe" of "DataWithCoords"  [call-arg]
+    # Note: mypy 1.18.1 also outputs a "defined here" note that pytest-mypy-plugins can't parse
 
 - case: test_mypy_pipe_tuple_return_type_dataset
   main: |
diff -pruN 2025.03.1-8/xarray/tests/test_datatree.py 2025.10.1-1/xarray/tests/test_datatree.py
--- 2025.03.1-8/xarray/tests/test_datatree.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/xarray/tests/test_datatree.py	2025-10-10 10:38:05.000000000 +0000
@@ -165,7 +165,9 @@ class TestPaths:
                 "/Kate": DataTree(),
             }
         )
-        assert john["/Mary"].same_tree(john["/Kate"])
+        mary = john.children["Mary"]
+        kate = john.children["Kate"]
+        assert mary.same_tree(kate)
 
     def test_relative_paths(self) -> None:
         john = DataTree.from_dict(
@@ -174,14 +176,8 @@ class TestPaths:
                 "/Annie": DataTree(),
             }
         )
-        sue_result = john["Mary/Sue"]
-        if isinstance(sue_result, DataTree):
-            sue: DataTree = sue_result
-
-        annie_result = john["Annie"]
-        if isinstance(annie_result, DataTree):
-            annie: DataTree = annie_result
-
+        sue = john.children["Mary"].children["Sue"]
+        annie = john.children["Annie"]
         assert sue.relative_to(john) == "Mary/Sue"
         assert john.relative_to(sue) == "../.."
         assert annie.relative_to(sue) == "../../Annie"
@@ -208,7 +204,7 @@ class TestStoreDatasets:
     def test_set_data(self) -> None:
         john = DataTree(name="john")
         dat = xr.Dataset({"a": 0})
-        john.dataset = dat  # type: ignore[assignment]
+        john.dataset = dat  # type: ignore[assignment,unused-ignore]
 
         assert_identical(john.to_dataset(), dat)
 
@@ -229,7 +225,7 @@ class TestStoreDatasets:
         eve = DataTree(children={"john": john})
         assert eve.is_hollow
 
-        eve.dataset = xr.Dataset({"a": 1})  # type: ignore[assignment]
+        eve.dataset = xr.Dataset({"a": 1})  # type: ignore[assignment,unused-ignore]
         assert not eve.is_hollow
 
 
@@ -266,13 +262,13 @@ class TestVariablesChildrenNameCollision
         )
 
         with pytest.raises(ValueError, match="node already contains a variable"):
-            dt.dataset = xr.Dataset({"a": 0})  # type: ignore[assignment]
+            dt.dataset = xr.Dataset({"a": 0})  # type: ignore[assignment,unused-ignore]
 
-        dt.dataset = xr.Dataset()  # type: ignore[assignment]
+        dt.dataset = xr.Dataset()  # type: ignore[assignment,unused-ignore]
 
         new_ds = dt.to_dataset().assign(a=xr.DataArray(0))
         with pytest.raises(ValueError, match="node already contains a variable"):
-            dt.dataset = new_ds  # type: ignore[assignment]
+            dt.dataset = new_ds  # type: ignore[assignment,unused-ignore]
 
 
 class TestGet: ...
@@ -531,7 +527,7 @@ class TestSetItem:
         john["mary"] = DataTree()
         assert_identical(john["mary"].to_dataset(), xr.Dataset())
 
-        john.dataset = xr.Dataset()  # type: ignore[assignment]
+        john.dataset = xr.Dataset()  # type: ignore[assignment,unused-ignore]
         with pytest.raises(ValueError, match="has no name"):
             john["."] = DataTree()
 
@@ -837,7 +833,7 @@ class TestTreeFromDict:
 
     def test_full(self, simple_datatree) -> None:
         dt = simple_datatree
-        paths = list(node.path for node in dt.subtree)
+        paths = [node.path for node in dt.subtree]
         assert paths == [
             "/",
             "/set1",
@@ -909,10 +905,91 @@ class TestTreeFromDict:
         # despite 'Bart' coming before 'Lisa' when sorted alphabetically
         assert list(reversed["Homer"].children.keys()) == ["Lisa", "Bart"]
 
-    def test_array_values(self) -> None:
+    def test_array_values_dataarray(self) -> None:
+        expected = DataTree(dataset=Dataset({"a": 1}))
+        actual = DataTree.from_dict({"a": DataArray(1)})
+        assert_identical(actual, expected)
+
+    def test_array_values_scalars(self) -> None:
+        expected = DataTree(
+            dataset=Dataset({"a": 1}),
+            children={"b": DataTree(Dataset({"c": 2, "d": 3}))},
+        )
+        actual = DataTree.from_dict({"a": 1, "b/c": 2, "b/d": 3})
+        assert_identical(actual, expected)
+
+    def test_invalid_values(self) -> None:
+        with pytest.raises(
+            TypeError,
+            match=re.escape(
+                r"failed to construct xarray.Dataset for DataTree node at '/' "
+                r"with data_vars={'a': set()} and coords={}"
+            ),
+        ):
+            DataTree.from_dict({"a": set()})
+
+    def test_array_values_nested_key(self) -> None:
+        expected = DataTree(
+            children={"a": DataTree(children={"b": DataTree(Dataset({"c": 1}))})}
+        )
+        actual = DataTree.from_dict(data={"a/b/c": 1})
+        assert_identical(actual, expected)
+
+    def test_nested_array_values(self) -> None:
+        expected = DataTree(
+            children={"a": DataTree(children={"b": DataTree(Dataset({"c": 1}))})}
+        )
+        actual = DataTree.from_dict({"a": {"b": {"c": 1}}}, nested=True)
+        assert_identical(actual, expected)
+
+    def test_nested_array_values_without_nested_kwarg(self) -> None:
+        with pytest.raises(
+            TypeError,
+            match=re.escape(
+                r"data contains a dict value at key='a', which is not a valid "
+                r"argument to DataTree.from_dict() with nested=False: "
+                r"{'b': {'c': 1}}"
+            ),
+        ):
+            DataTree.from_dict({"a": {"b": {"c": 1}}})
+
+    def test_nested_array_values_duplicates(self) -> None:
+        with pytest.raises(
+            ValueError,
+            match=re.escape("multiple entries found corresponding to node '/a/b'"),
+        ):
+            DataTree.from_dict({"a": {"b": 1}, "a/b": 2}, nested=True)
+
+    def test_array_values_data_and_coords(self) -> None:
+        expected = DataTree(dataset=Dataset({"a": 1}, coords={"b": 2}))
+        actual = DataTree.from_dict(data={"a": 1}, coords={"b": 2})
+        assert_identical(actual, expected)
+
+    def test_data_and_coords_conflicting(self) -> None:
+        with pytest.raises(
+            ValueError,
+            match=re.escape("multiple entries found corresponding to node '/a'"),
+        ):
+            DataTree.from_dict(data={"a": 1}, coords={"a": 2})
+
+    def test_array_values_new_name(self) -> None:
+        expected = DataTree(dataset=Dataset({"foo": 1}))
         data = {"foo": xr.DataArray(1, name="bar")}
-        with pytest.raises(TypeError):
-            DataTree.from_dict(data)  # type: ignore[arg-type]
+        actual = DataTree.from_dict(data)
+        assert_identical(actual, expected)
+
+    def test_array_values_at_root(self) -> None:
+        with pytest.raises(ValueError, match="cannot set DataArray value at root"):
+            DataTree.from_dict({"/": 1})
+
+    def test_array_values_parent_node_also_set(self) -> None:
+        with pytest.raises(
+            ValueError,
+            match=re.escape(
+                r"cannot set DataArray value at '/a' when parent node at '/' is also set"
+            ),
+        ):
+            DataTree.from_dict({"/": Dataset(), "/a": 1})
 
     def test_relative_paths(self) -> None:
         tree = DataTree.from_dict({".": None, "foo": None, "./bar": None, "x/y": None})
@@ -941,10 +1018,16 @@ class TestTreeFromDict:
         actual = DataTree.from_dict({"./": ds})
         assert_identical(actual, expected)
 
+    def test_multiple_entries(self):
         with pytest.raises(
-            ValueError, match="multiple entries found corresponding to the root node"
+            ValueError, match="multiple entries found corresponding to node '/'"
         ):
-            DataTree.from_dict({"": ds, "/": ds})
+            DataTree.from_dict({"": None, ".": None})
+
+        with pytest.raises(
+            ValueError, match="multiple entries found corresponding to node '/a'"
+        ):
+            DataTree.from_dict({"a": None, "/a": None})
 
     def test_name(self):
         tree = DataTree.from_dict({"/": None}, name="foo")
@@ -1019,6 +1102,29 @@ class TestDatasetView:
 
         weighted_mean(dt.dataset)
 
+    def test_map_keep_attrs(self) -> None:
+        # test DatasetView.map(..., keep_attrs=...)
+        data = xr.DataArray([1, 2, 3], dims="x", attrs={"da": "attrs"})
+        ds = xr.Dataset({"data": data}, attrs={"ds": "attrs"})
+        dt = DataTree(ds)
+
+        def func_keep(ds):
+            # x.mean() removes the attrs of the data_vars
+            return ds.map(lambda x: x.mean(), keep_attrs=True)
+
+        result = xr.map_over_datasets(func_keep, dt)
+        expected = dt.mean(keep_attrs=True)
+        xr.testing.assert_identical(result, expected)
+
+        # per default DatasetView.map does not keep attrs
+        def func(ds):
+            # x.mean() removes the attrs of the data_vars
+            return ds.map(lambda x: x.mean())
+
+        result = xr.map_over_datasets(func, dt)
+        expected = dt.mean()
+        xr.testing.assert_identical(result, expected.mean())
+
 
 class TestAccess:
     def test_attribute_access(self, create_test_datatree) -> None:
@@ -1053,7 +1159,7 @@ class TestAccess:
         var_keys = list(dt.variables.keys())
         assert all(var_key in key_completions for var_key in var_keys)
 
-    def test_ipython_key_completitions_subnode(self) -> None:
+    def test_ipython_key_completions_subnode(self) -> None:
         tree = xr.DataTree.from_dict({"/": None, "/a": None, "/a/b/": None})
         expected = ["b"]
         actual = tree["a"]._ipython_key_completions_()
@@ -1196,6 +1302,76 @@ class TestRepr:
         ).strip()
         assert result == expected
 
+    def test_repr_truncates_nodes(self) -> None:
+        # construct a datatree with 50 nodes
+        number_of_files = 10
+        number_of_groups = 5
+        tree_dict = {}
+        for f in range(number_of_files):
+            for g in range(number_of_groups):
+                tree_dict[f"file_{f}/group_{g}"] = Dataset({"g": f * g})
+
+        tree = DataTree.from_dict(tree_dict)
+        with xr.set_options(display_max_children=3):
+            result = repr(tree)
+
+        expected = dedent(
+            """
+            <xarray.DataTree>
+            Group: /
+            ├── Group: /file_0
+            │   ├── Group: /file_0/group_0
+            │   │       Dimensions:  ()
+            │   │       Data variables:
+            │   │           g        int64 8B 0
+            │   ├── Group: /file_0/group_1
+            │   │       Dimensions:  ()
+            │   │       Data variables:
+            │   │           g        int64 8B 0
+            │   ...
+            │   └── Group: /file_0/group_4
+            │           Dimensions:  ()
+            │           Data variables:
+            │               g        int64 8B 0
+            ├── Group: /file_1
+            │   ├── Group: /file_1/group_0
+            │   │       Dimensions:  ()
+            │   │       Data variables:
+            │   │           g        int64 8B 0
+            │   ├── Group: /file_1/group_1
+            │   │       Dimensions:  ()
+            │   │       Data variables:
+            │   │           g        int64 8B 1
+            │   ...
+            │   └── Group: /file_1/group_4
+            │           Dimensions:  ()
+            │           Data variables:
+            │               g        int64 8B 4
+            ...
+            └── Group: /file_9
+                ├── Group: /file_9/group_0
+                │       Dimensions:  ()
+                │       Data variables:
+                │           g        int64 8B 0
+                ├── Group: /file_9/group_1
+                │       Dimensions:  ()
+                │       Data variables:
+                │           g        int64 8B 9
+                ...
+                └── Group: /file_9/group_4
+                        Dimensions:  ()
+                        Data variables:
+                            g        int64 8B 36
+            """
+        ).strip()
+        assert expected == result
+
+        with xr.set_options(display_max_children=10):
+            result = repr(tree)
+
+        for key in tree_dict:
+            assert key in result
+
     def test_repr_inherited_dims(self) -> None:
         tree = DataTree.from_dict(
             {
@@ -1340,7 +1516,6 @@ class TestRepr:
 
 def _exact_match(message: str) -> str:
     return re.escape(dedent(message).strip())
-    return "^" + re.escape(dedent(message.rstrip())) + "$"
 
 
 class TestInheritance:
@@ -1480,7 +1655,7 @@ class TestInheritance:
             )
 
         dt = DataTree()
-        dt.dataset = xr.Dataset(coords={"x": [1.0]})  # type: ignore[assignment]
+        dt.dataset = xr.Dataset(coords={"x": [1.0]})  # type: ignore[assignment,unused-ignore]
         dt["/b"] = DataTree()
         with pytest.raises(ValueError, match=expected_msg):
             dt["/b"].dataset = xr.Dataset(coords={"x": [2.0]})
@@ -1515,7 +1690,7 @@ class TestInheritance:
             )
 
         dt = DataTree()
-        dt.dataset = xr.Dataset(coords={"x": [1.0]})  # type: ignore[assignment]
+        dt.dataset = xr.Dataset(coords={"x": [1.0]})  # type: ignore[assignment,unused-ignore]
         dt["/b/c"] = DataTree()
         with pytest.raises(ValueError, match=expected_msg):
             dt["/b/c"].dataset = xr.Dataset(coords={"x": [2.0]})
@@ -1564,11 +1739,11 @@ class TestRestructuring:
 
         # test drop multiple nodes
         dropped = sue.drop_nodes(names=["Mary", "Kate"])
-        assert not set(["Mary", "Kate"]).intersection(set(dropped.children))
+        assert not {"Mary", "Kate"}.intersection(set(dropped.children))
         assert "Ashley" in dropped.children
 
         # test raise
-        with pytest.raises(KeyError, match="nodes {'Mary'} not present"):
+        with pytest.raises(KeyError, match=r"nodes {'Mary'} not present"):
             dropped.drop_nodes(names=["Mary", "Ashley"])
 
         # test ignore
@@ -1792,7 +1967,7 @@ class TestIsomorphicEqualsAndIdentical:
         assert not child.identical(new_child)
 
         deeper_root = DataTree(children={"root": root})
-        grandchild = deeper_root["/root/child"]
+        grandchild = deeper_root.children["root"].children["child"]
         assert child.equals(grandchild)
         assert child.identical(grandchild)
 
@@ -1850,6 +2025,85 @@ class TestSubset:
         )
         assert_identical(actual, expected)
 
+    def test_prune_basic(self) -> None:
+        tree = DataTree.from_dict(
+            {"/a": xr.Dataset({"foo": ("x", [1, 2])}), "/b": xr.Dataset()}
+        )
+
+        pruned = tree.prune()
+
+        assert "a" in pruned.children
+        assert "b" not in pruned.children
+        assert_identical(
+            pruned.children["a"].to_dataset(), tree.children["a"].to_dataset()
+        )
+
+    def test_prune_with_zero_size_vars(self) -> None:
+        tree = DataTree.from_dict(
+            {
+                "/a": xr.Dataset({"foo": ("x", [1, 2])}),
+                "/b": xr.Dataset({"empty": ("dim", [])}),
+                "/c": xr.Dataset(),
+            }
+        )
+
+        pruned_default = tree.prune()
+        expected_default = DataTree.from_dict(
+            {
+                "/a": xr.Dataset({"foo": ("x", [1, 2])}),
+                "/b": xr.Dataset({"empty": ("dim", [])}),
+            }
+        )
+        assert_identical(pruned_default, expected_default)
+
+        pruned_strict = tree.prune(drop_size_zero_vars=True)
+        expected_strict = DataTree.from_dict(
+            {
+                "/a": xr.Dataset({"foo": ("x", [1, 2])}),
+            }
+        )
+        assert_identical(pruned_strict, expected_strict)
+
+    def test_prune_with_intermediate_nodes(self) -> None:
+        tree = DataTree.from_dict(
+            {
+                "/": xr.Dataset(),
+                "/group1": xr.Dataset(),
+                "/group1/subA": xr.Dataset({"temp": ("x", [1, 2])}),
+                "/group1/subB": xr.Dataset(),
+                "/group2": xr.Dataset({"empty": ("dim", [])}),
+            }
+        )
+        pruned = tree.prune()
+        expected_tree = DataTree.from_dict(
+            {
+                "/group1/subA": xr.Dataset({"temp": ("x", [1, 2])}),
+                "/group2": xr.Dataset({"empty": ("dim", [])}),
+            }
+        )
+        assert_identical(pruned, expected_tree)
+
+    def test_prune_after_filtering(self) -> None:
+        from pandas import date_range
+
+        ds1 = xr.Dataset(
+            {"foo": ("time", [1, 2, 3, 4, 5])},
+            coords={"time": date_range("2023-01-01", periods=5, freq="D")},
+        )
+        ds2 = xr.Dataset(
+            {"var": ("time", [1, 2, 3, 4, 5])},
+            coords={"time": date_range("2023-01-04", periods=5, freq="D")},
+        )
+
+        tree = DataTree.from_dict({"a": ds1, "b": ds2})
+        filtered = tree.sel(time=slice("2023-01-01", "2023-01-03"))
+
+        pruned = filtered.prune(drop_size_zero_vars=True)
+        expected_tree = DataTree.from_dict(
+            {"a": ds1.sel(time=slice("2023-01-01", "2023-01-03"))}
+        )
+        assert_identical(pruned, expected_tree)
+
 
 class TestIndexing:
     def test_isel_siblings(self) -> None:
@@ -2217,7 +2471,7 @@ class TestUFuncs:
     @pytest.mark.xfail(reason="__array_ufunc__ not implemented yet")
     def test_tree(self, create_test_datatree):
         dt = create_test_datatree()
-        expected = create_test_datatree(modify=lambda ds: np.sin(ds))
+        expected = create_test_datatree(modify=np.sin)
         result_tree = np.sin(dt)
         assert_equal(result_tree, expected)
 
@@ -2232,7 +2486,7 @@ class Closer:
         self.closed = True
 
 
-@pytest.fixture()
+@pytest.fixture
 def tree_and_closers():
     tree = DataTree.from_dict({"/child/grandchild": None})
     closers = {
diff -pruN 2025.03.1-8/xarray/tests/test_datatree_mapping.py 2025.10.1-1/xarray/tests/test_datatree_mapping.py
--- 2025.03.1-8/xarray/tests/test_datatree_mapping.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/xarray/tests/test_datatree_mapping.py	2025-10-10 10:38:05.000000000 +0000
@@ -32,7 +32,7 @@ class TestMapOverSubTree:
     def test_no_trees_returned(self, create_test_datatree):
         dt1 = create_test_datatree()
         dt2 = create_test_datatree()
-        expected = xr.DataTree.from_dict({k: None for k in dt1.to_dict()})
+        expected = xr.DataTree.from_dict(dict.fromkeys(dt1.to_dict()))
         actual = map_over_datasets(lambda x, y: None, dt1, dt2)
         assert_equal(expected, actual)
 
diff -pruN 2025.03.1-8/xarray/tests/test_datatree_typing.yml 2025.10.1-1/xarray/tests/test_datatree_typing.yml
--- 2025.03.1-8/xarray/tests/test_datatree_typing.yml	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/xarray/tests/test_datatree_typing.yml	2025-10-10 10:38:05.000000000 +0000
@@ -107,6 +107,9 @@
     main:7: note:     def [T] pipe(self, func: tuple[Callable[..., T], str], *args: Any, **kwargs: Any) -> T
 
 - case: test_mypy_pipe_function_unexpected_keyword
+  skip: True # mypy 1.18+ outputs "defined here" notes without line numbers (e.g., "xarray/core/datatree.py: note:...")
+  # pytest-mypy-plugins expects all lines to match "file:line: severity: message" format and can't parse these notes.
+  # This is a mypy behavior, not a bug. The test would need pytest-mypy-plugins to support notes without line numbers.
   main: |
     from xarray import DataTree
 
@@ -117,6 +120,7 @@
     dt = DataTree().pipe(f, 42, kw=99)
   out: |
     main:7: error: Unexpected keyword argument "kw" for "pipe" of "DataTree"  [call-arg]
+    # Note: mypy 1.18.1 also outputs a "defined here" note that pytest-mypy-plugins can't parse
 
 - case: test_mypy_pipe_tuple_return_type_datatree
   main: |
diff -pruN 2025.03.1-8/xarray/tests/test_distributed.py 2025.10.1-1/xarray/tests/test_distributed.py
--- 2025.03.1-8/xarray/tests/test_distributed.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/xarray/tests/test_distributed.py	2025-10-10 10:38:05.000000000 +0000
@@ -17,16 +17,18 @@ else:
     da = pytest.importorskip("dask.array")
     distributed = pytest.importorskip("distributed")
 
+import contextlib
+
 from dask.distributed import Client, Lock
 from distributed.client import futures_of
-from distributed.utils_test import (  # noqa: F401
-    cleanup,
-    client,
+from distributed.utils_test import (
+    cleanup,  # noqa: F401
+    client,  # noqa: F401
     cluster,
-    cluster_fixture,
+    cluster_fixture,  # noqa: F401
     gen_cluster,
-    loop,
-    loop_in_thread,
+    loop,  # noqa: F401
+    loop_in_thread,  # noqa: F401
 )
 
 import xarray as xr
@@ -47,9 +49,6 @@ from xarray.tests.test_backends import (
 )
 from xarray.tests.test_dataset import create_test_data
 
-loop = loop  # loop is an imported fixture, which flake8 has issues ack-ing
-client = client  # client is an imported fixture, which flake8 has issues ack-ing
-
 
 @pytest.fixture
 def tmp_netcdf_filename(tmpdir):
@@ -86,15 +85,20 @@ ENGINES_AND_FORMATS = [
 
 
 @pytest.mark.parametrize("engine,nc_format", ENGINES_AND_FORMATS)
+@pytest.mark.parametrize("compute", [True, False])
 def test_dask_distributed_netcdf_roundtrip(
-    loop, tmp_netcdf_filename, engine, nc_format
+    loop,  # noqa: F811
+    tmp_netcdf_filename,
+    engine,
+    nc_format,
+    compute,
 ):
     if engine not in ENGINES:
         pytest.skip("engine not available")
 
     chunks = {"dim1": 4, "dim2": 3, "dim3": 6}
 
-    with cluster() as (s, [a, b]):
+    with cluster() as (s, [_a, _b]):
         with Client(s["address"], loop=loop):
             original = create_test_data().chunk(chunks)
 
@@ -105,7 +109,11 @@ def test_dask_distributed_netcdf_roundtr
                     )
                 return
 
-            original.to_netcdf(tmp_netcdf_filename, engine=engine, format=nc_format)
+            result = original.to_netcdf(
+                tmp_netcdf_filename, engine=engine, format=nc_format, compute=compute
+            )
+            if not compute:
+                result.compute()
 
             with xr.open_dataset(
                 tmp_netcdf_filename, chunks=chunks, engine=engine
@@ -117,9 +125,10 @@ def test_dask_distributed_netcdf_roundtr
 
 @requires_netCDF4
 def test_dask_distributed_write_netcdf_with_dimensionless_variables(
-    loop, tmp_netcdf_filename
+    loop,  # noqa: F811
+    tmp_netcdf_filename,
 ):
-    with cluster() as (s, [a, b]):
+    with cluster() as (s, [_a, _b]):
         with Client(s["address"], loop=loop):
             original = xr.Dataset({"x": da.zeros(())})
             original.to_netcdf(tmp_netcdf_filename)
@@ -138,7 +147,7 @@ def test_open_mfdataset_can_open_files_w
     da = xr.DataArray(data, coords={"time": T, "Lon": Lon}, name="test")
     file_path = tmp_path / "test.nc"
     da.to_netcdf(file_path)
-    with cluster() as (s, [a, b]):
+    with cluster() as (s, [_a, _b]):
         with Client(s["address"]):
             with xr.open_mfdataset(file_path, parallel=parallel) as tf:
                 assert_identical(tf["test"], da)
@@ -159,7 +168,7 @@ def test_open_mfdataset_multiple_files_p
         da.isel(time=slice(i, i + 10)).to_netcdf(fname)
         fnames.append(fname)
 
-    with cluster() as (s, [a, b]):
+    with cluster() as (s, [_a, _b]):
         with Client(s["address"]):
             with xr.open_mfdataset(
                 fnames, parallel=parallel, concat_dim="time", combine="nested"
@@ -197,14 +206,17 @@ def test_open_mfdataset_multiple_files_p
 
 @pytest.mark.parametrize("engine,nc_format", ENGINES_AND_FORMATS)
 def test_dask_distributed_read_netcdf_integration_test(
-    loop, tmp_netcdf_filename, engine, nc_format
+    loop,  # noqa: F811
+    tmp_netcdf_filename,
+    engine,
+    nc_format,
 ):
     if engine not in ENGINES:
         pytest.skip("engine not available")
 
     chunks = {"dim1": 4, "dim2": 3, "dim3": 6}
 
-    with cluster() as (s, [a, b]):
+    with cluster() as (s, [_a, _b]):
         with Client(s["address"], loop=loop):
             original = create_test_data()
             original.to_netcdf(tmp_netcdf_filename, engine=engine, format=nc_format)
@@ -220,8 +232,8 @@ def test_dask_distributed_read_netcdf_in
 # fixture vendored from dask
 # heads-up, this is using quite private zarr API
 # https://github.com/dask/dask/blob/e04734b4d8959ba259801f2e2a490cb4ee8d891f/dask/tests/test_distributed.py#L338-L358
-@pytest.fixture(scope="function")
-def zarr(client):
+@pytest.fixture
+def zarr(client):  # noqa: F811
     zarr_lib = pytest.importorskip("zarr")
     # Zarr-Python 3 lazily allocates a dedicated thread/IO loop
     # for to execute async tasks. To avoid having this thread
@@ -238,17 +250,15 @@ def zarr(client):
         # an IO loop. Here we clean up these resources to avoid leaking threads
         # In normal operations, this is done as by an atexit handler when Zarr
         # is shutting down.
-        try:
+        with contextlib.suppress(AttributeError):
             zarr_lib.core.sync.cleanup_resources()
-        except AttributeError:
-            pass
 
 
 @requires_zarr
 @pytest.mark.parametrize("consolidated", [True, False])
 @pytest.mark.parametrize("compute", [True, False])
 def test_dask_distributed_zarr_integration_test(
-    client,
+    client,  # noqa: F811
     zarr,
     consolidated: bool,
     compute: bool,
diff -pruN 2025.03.1-8/xarray/tests/test_duck_array_ops.py 2025.10.1-1/xarray/tests/test_duck_array_ops.py
--- 2025.03.1-8/xarray/tests/test_duck_array_ops.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/xarray/tests/test_duck_array_ops.py	2025-10-10 10:38:05.000000000 +0000
@@ -1,7 +1,10 @@
 from __future__ import annotations
 
+import copy
 import datetime as dt
+import pickle
 import warnings
+from typing import Any
 
 import numpy as np
 import pandas as pd
@@ -62,13 +65,13 @@ try:
 
     @pytest.fixture
     def arrow1():
-        return pd.arrays.ArrowExtensionArray(
+        return pd.arrays.ArrowExtensionArray(  # type: ignore[attr-defined]
             pa.array([{"x": 1, "y": True}, {"x": 2, "y": False}])
         )
 
     @pytest.fixture
     def arrow2():
-        return pd.arrays.ArrowExtensionArray(
+        return pd.arrays.ArrowExtensionArray(  # type: ignore[attr-defined]
             pa.array([{"x": 3, "y": False}, {"x": 4, "y": True}])
         )
 
@@ -196,8 +199,18 @@ class TestOps:
         concatenated = concatenate(
             (PandasExtensionArray(arrow1), PandasExtensionArray(arrow2))
         )
-        assert concatenated[2]["x"] == 3
-        assert concatenated[3]["y"]
+        assert concatenated[2].array[0]["x"] == 3
+        assert concatenated[3].array[0]["y"]
+
+    @requires_pyarrow
+    def test_extension_array_copy_arrow_type(self):
+        arr = pd.array([pd.NA, 1, 2], dtype="int64[pyarrow]")
+        # Relying on the `__getattr__` of `PandasExtensionArray` to do the deep copy
+        # recursively only fails for `int64[pyarrow]` and similar types so this
+        # test ensures that copying still works there.
+        assert isinstance(
+            copy.deepcopy(PandasExtensionArray(arr), memo=None).array, type(arr)
+        )
 
     def test___getitem__extension_duck_array(self, categorical1):
         extension_duck_array = PandasExtensionArray(categorical1)
@@ -367,7 +380,7 @@ def construct_dataarray(dim_num, dtype,
     da = DataArray(array, dims=dims, coords={"x": np.arange(16)}, name="da")
 
     if dask and has_dask:
-        chunks = {d: 4 for d in dims}
+        chunks = dict.fromkeys(dims, 4)
         da = da.chunk(chunks)
 
     return da
@@ -575,7 +588,7 @@ def test_reduce(dim_num, dtype, dask, fu
     if dask and not has_dask:
         pytest.skip("requires dask")
 
-    if dask and skipna is False and dtype in [np.bool_]:
+    if dask and skipna is False and dtype == np.bool_:
         pytest.skip("dask does not compute object-typed array")
 
     rtol = 1e-04 if dtype == np.float32 else 1e-05
@@ -928,8 +941,8 @@ def test_datetime_to_numeric_cftime(dask
         result = duck_array_ops.datetime_to_numeric(
             times, datetime_unit="h", dtype=dtype
         )
-    expected = 24 * np.arange(0, 35, 7).astype(dtype)
-    np.testing.assert_array_equal(result, expected)
+    expected2: Any = 24 * np.arange(0, 35, 7).astype(dtype)
+    np.testing.assert_array_equal(result, expected2)
 
     with raise_if_dask_computes():
         if dask:
@@ -939,8 +952,8 @@ def test_datetime_to_numeric_cftime(dask
         result = duck_array_ops.datetime_to_numeric(
             time, offset=times[0], datetime_unit="h", dtype=int
         )
-    expected = np.array(24 * 7).astype(int)
-    np.testing.assert_array_equal(result, expected)
+    expected3 = np.array(24 * 7).astype(int)
+    np.testing.assert_array_equal(result, expected3)
 
 
 @requires_cftime
@@ -1096,6 +1109,17 @@ def test_extension_array_repr(int1):
     assert repr(int1) in repr(int_duck_array)
 
 
-def test_extension_array_attr(int1):
-    int_duck_array = PandasExtensionArray(int1)
-    assert (~int_duck_array.fillna(10)).all()
+def test_extension_array_attr():
+    array = pd.Categorical(["cat2", "cat1", "cat2", "cat3", "cat1"])
+    wrapped = PandasExtensionArray(array)
+    assert_array_equal(array.categories, wrapped.categories)
+    assert array.nbytes == wrapped.nbytes
+
+    roundtripped = pickle.loads(pickle.dumps(wrapped))
+    assert isinstance(roundtripped, PandasExtensionArray)
+    assert (roundtripped == wrapped).all()
+
+    interval_array = pd.arrays.IntervalArray.from_breaks([0, 1, 2, 3], closed="right")
+    wrapped = PandasExtensionArray(interval_array)
+    assert_array_equal(wrapped.left, interval_array.left, strict=True)
+    assert wrapped.closed == interval_array.closed
diff -pruN 2025.03.1-8/xarray/tests/test_duck_array_wrapping.py 2025.10.1-1/xarray/tests/test_duck_array_wrapping.py
--- 2025.03.1-8/xarray/tests/test_duck_array_wrapping.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/xarray/tests/test_duck_array_wrapping.py	2025-10-10 10:38:05.000000000 +0000
@@ -101,6 +101,14 @@ NAMESPACE_ARRAYS = {
     },
 }
 
+try:
+    import jax  # type: ignore[import-not-found,unused-ignore]
+
+    # enable double-precision
+    jax.config.update("jax_enable_x64", True)
+except ImportError:
+    pass
+
 
 class _BaseTest:
     def setup_for_test(self, request, namespace):
@@ -155,7 +163,7 @@ class TestTopLevelMethods(_BaseTest):
         assert isinstance(result.data, self.Array)
 
     def test_merge(self):
-        result = xr.merge([self.x1, self.x2], compat="override")
+        result = xr.merge([self.x1, self.x2], compat="override", join="outer")
         assert isinstance(result.foo.data, self.Array)
 
     def test_where(self):
diff -pruN 2025.03.1-8/xarray/tests/test_formatting.py 2025.10.1-1/xarray/tests/test_formatting.py
--- 2025.03.1-8/xarray/tests/test_formatting.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/xarray/tests/test_formatting.py	2025-10-10 10:38:05.000000000 +0000
@@ -101,6 +101,9 @@ class TestFormatting:
             (np.float16(1.1234), "1.123"),
             (np.float32(1.0111111), "1.011"),
             (np.float64(22.222222), "22.22"),
+            (np.zeros((1, 1)), "[[0.]]"),
+            (np.zeros(2), "[0. 0.]"),
+            (np.zeros((2, 2)), "[[0. 0.]\n [0. 0.]]"),
         ]
         for item, expected in cases:
             actual = formatting.format_item(item)
@@ -718,6 +721,27 @@ class TestFormatting:
         actual = formatting.diff_datatree_repr(dt_1, dt_2, "identical")
         assert actual == expected
 
+    def test_diff_datatree_repr_equals(self) -> None:
+        ds1 = xr.Dataset(data_vars={"data": ("y", [5, 2])})
+        ds2 = xr.Dataset(data_vars={"data": (("x", "y"), [[5, 2]])})
+        dt1 = xr.DataTree.from_dict({"node": ds1})
+        dt2 = xr.DataTree.from_dict({"node": ds2})
+
+        expected = dedent(
+            """\
+            Left and right DataTree objects are not equal
+
+            Data at node 'node' does not match:
+                Differing dimensions:
+                    (y: 2) != (x: 1, y: 2)
+                Differing data variables:
+                L   data     (y) int64 16B 5 2
+                R   data     (x, y) int64 16B 5 2"""
+        )
+
+        actual = formatting.diff_datatree_repr(dt1, dt2, "equals")
+        assert actual == expected
+
 
 def test_inline_variable_array_repr_custom_repr() -> None:
     class CustomArray:
@@ -833,7 +857,7 @@ def test__mapping_repr(display_max_rows,
     a = np.char.add(long_name, np.arange(0, n_vars).astype(str))
     b = np.char.add("attr_", np.arange(0, n_attr).astype(str))
     c = np.char.add("coord", np.arange(0, n_vars).astype(str))
-    attrs = {k: 2 for k in b}
+    attrs = dict.fromkeys(b, 2)
     coords = {_c: np.array([0, 1], dtype=np.uint64) for _c in c}
     data_vars = dict()
     for v, _c in zip(a, coords.items(), strict=True):
@@ -939,7 +963,11 @@ def test_lazy_array_wont_compute() -> No
 
     class LazilyIndexedArrayNotComputable(LazilyIndexedArray):
         def __array__(
-            self, dtype: np.typing.DTypeLike = None, /, *, copy: bool | None = None
+            self,
+            dtype: np.typing.DTypeLike | None = None,
+            /,
+            *,
+            copy: bool | None = None,
         ) -> np.ndarray:
             raise NotImplementedError("Computing this array is not possible.")
 
@@ -1165,3 +1193,46 @@ Dimensions without coordinates: x
 Dimensions without coordinates: x
         """.strip()
     assert actual == expected
+
+
+def test_repr_pandas_range_index() -> None:
+    # lazy data repr but values shown in inline repr
+    xidx = xr.indexes.PandasIndex(pd.RangeIndex(10), "x")
+    ds = xr.Dataset(coords=xr.Coordinates.from_xindex(xidx))
+    actual = repr(ds.x)
+    expected = """
+<xarray.DataArray 'x' (x: 10)> Size: 80B
+[10 values with dtype=int64]
+Coordinates:
+  * x        (x) int64 80B 0 1 2 3 4 5 6 7 8 9
+    """.strip()
+    assert actual == expected
+
+
+def test_repr_pandas_multi_index() -> None:
+    # lazy data repr but values shown in inline repr
+    midx = pd.MultiIndex.from_product([["a", "b"], [1, 2]], names=["foo", "bar"])
+    coords = xr.Coordinates.from_pandas_multiindex(midx, "x")
+    ds = xr.Dataset(coords=coords)
+
+    actual = repr(ds.x)
+    expected = """
+<xarray.DataArray 'x' (x: 4)> Size: 32B
+[4 values with dtype=object]
+Coordinates:
+  * x        (x) object 32B MultiIndex
+  * foo      (x) object 32B 'a' 'a' 'b' 'b'
+  * bar      (x) int64 32B 1 2 1 2
+    """.strip()
+    assert actual == expected
+
+    actual = repr(ds.foo)
+    expected = """
+<xarray.DataArray 'foo' (x: 4)> Size: 32B
+[4 values with dtype=object]
+Coordinates:
+  * x        (x) object 32B MultiIndex
+  * foo      (x) object 32B 'a' 'a' 'b' 'b'
+  * bar      (x) int64 32B 1 2 1 2
+    """.strip()
+    assert actual == expected
diff -pruN 2025.03.1-8/xarray/tests/test_formatting_html.py 2025.10.1-1/xarray/tests/test_formatting_html.py
--- 2025.03.1-8/xarray/tests/test_formatting_html.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/xarray/tests/test_formatting_html.py	2025-10-10 10:38:05.000000000 +0000
@@ -1,5 +1,8 @@
 from __future__ import annotations
 
+import re
+from functools import partial
+
 import numpy as np
 import pandas as pd
 import pytest
@@ -9,6 +12,66 @@ from xarray.core import formatting_html
 from xarray.core.coordinates import Coordinates
 
 
+def drop_fallback_text_repr(html: str) -> str:
+    pattern = (
+        re.escape("<pre class='xr-text-repr-fallback'>") + "[^<]*" + re.escape("</pre>")
+    )
+    return re.sub(pattern, "", html)
+
+
+XarrayTypes = xr.DataTree | xr.Dataset | xr.DataArray | xr.Variable
+
+
+def xarray_html_only_repr(obj: XarrayTypes) -> str:
+    return drop_fallback_text_repr(obj._repr_html_())
+
+
+def assert_consistent_text_and_html(
+    obj: XarrayTypes, section_headers: list[str]
+) -> None:
+    actual_html = xarray_html_only_repr(obj)
+    actual_text = repr(obj)
+    for section_header in section_headers:
+        assert actual_html.count(section_header) == actual_text.count(section_header), (
+            section_header
+        )
+
+
+assert_consistent_text_and_html_dataarray = partial(
+    assert_consistent_text_and_html,
+    section_headers=[
+        "Coordinates",
+        "Indexes",
+        "Attributes",
+    ],
+)
+
+
+assert_consistent_text_and_html_dataset = partial(
+    assert_consistent_text_and_html,
+    section_headers=[
+        "Dimensions",
+        "Coordinates",
+        "Data variables",
+        "Indexes",
+        "Attributes",
+    ],
+)
+
+
+assert_consistent_text_and_html_datatree = partial(
+    assert_consistent_text_and_html,
+    section_headers=[
+        "Dimensions",
+        "Coordinates",
+        "Inherited coordinates",
+        "Data variables",
+        "Indexes",
+        "Attributes",
+    ],
+)
+
+
 @pytest.fixture
 def dataarray() -> xr.DataArray:
     return xr.DataArray(np.random.default_rng(0).random((4, 6)))
@@ -43,7 +106,7 @@ def dataset() -> xr.Dataset:
             "tmin": (("time", "location"), tmin_values),
             "tmax": (("time", "location"), tmax_values),
         },
-        {"time": times, "location": ["<IA>", "IN", "IL"]},
+        {"location": ["<IA>", "IN", "IL"], "time": times},
         attrs={"description": "Test data."},
     )
 
@@ -99,56 +162,91 @@ def test_summarize_attrs_with_unsafe_att
     assert "<dd>&lt;pd.DataFrame&gt;</dd>" in formatted
 
 
-def test_repr_of_dataarray(dataarray: xr.DataArray) -> None:
-    formatted = fh.array_repr(dataarray)
+def test_repr_of_dataarray() -> None:
+    dataarray = xr.DataArray(np.random.default_rng(0).random((4, 6)))
+    formatted = xarray_html_only_repr(dataarray)
     assert "dim_0" in formatted
     # has an expanded data section
     assert formatted.count("class='xr-array-in' type='checkbox' checked>") == 1
-    # coords, indexes and attrs don't have an items so they'll be be disabled and collapsed
-    assert (
-        formatted.count("class='xr-section-summary-in' type='checkbox' disabled >") == 3
-    )
+    # coords, indexes and attrs don't have an items so they'll be omitted
+    assert "Coordinates" not in formatted
+    assert "Indexes" not in formatted
+    assert "Attributes" not in formatted
+
+    assert_consistent_text_and_html_dataarray(dataarray)
 
     with xr.set_options(display_expand_data=False):
-        formatted = fh.array_repr(dataarray)
+        formatted = xarray_html_only_repr(dataarray)
         assert "dim_0" in formatted
         # has a collapsed data section
         assert formatted.count("class='xr-array-in' type='checkbox' checked>") == 0
-        # coords, indexes and attrs don't have an items so they'll be be disabled and collapsed
-        assert (
-            formatted.count("class='xr-section-summary-in' type='checkbox' disabled >")
-            == 3
-        )
+        # coords, indexes and attrs don't have an items so they'll be omitted
+        assert "Coordinates" not in formatted
+        assert "Indexes" not in formatted
+        assert "Attributes" not in formatted
+
+
+def test_repr_coords_order_of_datarray() -> None:
+    da1 = xr.DataArray(
+        np.empty((2, 2)),
+        coords={"foo": [0, 1], "bar": [0, 1]},
+        dims=["foo", "bar"],
+    )
+    da2 = xr.DataArray(
+        np.empty((2, 2)),
+        coords={"bar": [0, 1], "foo": [0, 1]},
+        dims=["bar", "foo"],
+    )
+    ds = xr.Dataset({"da1": da1, "da2": da2})
+
+    bar_line = (
+        "<span class='xr-has-index'>bar</span></div><div class='xr-var-dims'>(bar)"
+    )
+    foo_line = (
+        "<span class='xr-has-index'>foo</span></div><div class='xr-var-dims'>(foo)"
+    )
+
+    formatted_da1 = fh.array_repr(ds.da1)
+    assert formatted_da1.index(foo_line) < formatted_da1.index(bar_line)
+
+    formatted_da2 = fh.array_repr(ds.da2)
+    assert formatted_da2.index(bar_line) < formatted_da2.index(foo_line)
 
 
 def test_repr_of_multiindex(multiindex: xr.Dataset) -> None:
     formatted = fh.dataset_repr(multiindex)
     assert "(x)" in formatted
 
+    assert_consistent_text_and_html_dataset(multiindex)
+
 
 def test_repr_of_dataset(dataset: xr.Dataset) -> None:
-    formatted = fh.dataset_repr(dataset)
+    formatted = xarray_html_only_repr(dataset)
     # coords, attrs, and data_vars are expanded
     assert (
         formatted.count("class='xr-section-summary-in' type='checkbox'  checked>") == 3
     )
-    # indexes is collapsed
-    assert formatted.count("class='xr-section-summary-in' type='checkbox'  >") == 1
+    # indexes is omitted
+    assert "Indexes" not in formatted
     assert "&lt;U4" in formatted or "&gt;U4" in formatted
     assert "&lt;IA&gt;" in formatted
 
+    assert_consistent_text_and_html_dataset(dataset)
+
     with xr.set_options(
         display_expand_coords=False,
         display_expand_data_vars=False,
         display_expand_attrs=False,
         display_expand_indexes=True,
+        display_default_indexes=True,
     ):
-        formatted = fh.dataset_repr(dataset)
-        # coords, attrs, and data_vars are collapsed, indexes is expanded
+        formatted = xarray_html_only_repr(dataset)
+        # coords, attrs, and data_vars are collapsed, indexes is shown & expanded
         assert (
             formatted.count("class='xr-section-summary-in' type='checkbox'  checked>")
             == 1
         )
+        assert "Indexes" in formatted
         assert "&lt;U4" in formatted or "&gt;U4" in formatted
         assert "&lt;IA&gt;" in formatted
 
@@ -160,6 +258,17 @@ def test_repr_text_fallback(dataset: xr.
     assert "<pre class='xr-text-repr-fallback'>" in formatted
 
 
+def test_repr_coords_order_of_dataset() -> None:
+    ds = xr.Dataset()
+    ds.coords["as"] = 10
+    ds["var"] = xr.DataArray(np.ones((10,)), dims="x", coords={"x": np.arange(10)})
+    formatted = fh.dataset_repr(ds)
+
+    x_line = "<span class='xr-has-index'>x</span></div><div class='xr-var-dims'>(x)"
+    as_line = "<span>as</span></div><div class='xr-var-dims'>()"
+    assert formatted.index(x_line) < formatted.index(as_line)
+
+
 def test_variable_repr_html() -> None:
     v = xr.Variable(["time", "x"], [[1, 2, 3], [4, 5, 6]], {"foo": "bar"})
     assert hasattr(v, "_repr_html_")
@@ -198,214 +307,71 @@ def test_nonstr_variable_repr_html() ->
     assert "<li><span>10</span>: 3</li></ul>" in html
 
 
-@pytest.fixture(scope="module", params=["some html", "some other html"])
-def repr(request):
-    return request.param
-
-
-class Test_summarize_datatree_children:
-    """
-    Unit tests for summarize_datatree_children.
-    """
-
-    func = staticmethod(fh.summarize_datatree_children)
-
-    @pytest.fixture(scope="class")
-    def childfree_tree_factory(self):
-        """
-        Fixture for a child-free DataTree factory.
-        """
-        from random import randint
-
-        def _childfree_tree_factory():
-            return xr.DataTree(
-                dataset=xr.Dataset({"z": ("y", [randint(1, 100) for _ in range(3)])})
-            )
-
-        return _childfree_tree_factory
-
-    @pytest.fixture(scope="class")
-    def childfree_tree(self, childfree_tree_factory):
-        """
-        Fixture for a child-free DataTree.
-        """
-        return childfree_tree_factory()
-
-    @pytest.fixture(scope="function")
-    def mock_datatree_node_repr(self, monkeypatch):
-        """
-        Apply mocking for datatree_node_repr.
-        """
-
-        def mock(group_title, dt):
-            """
-            Mock with a simple result
-            """
-            return group_title + " " + str(id(dt))
-
-        monkeypatch.setattr(fh, "datatree_node_repr", mock)
-
-    @pytest.fixture(scope="function")
-    def mock_wrap_datatree_repr(self, monkeypatch):
-        """
-        Apply mocking for _wrap_datatree_repr.
-        """
-
-        def mock(r, *, end, **kwargs):
-            """
-            Mock by appending "end" or "not end".
-            """
-            return r + " " + ("end" if end else "not end") + "//"
-
-        monkeypatch.setattr(fh, "_wrap_datatree_repr", mock)
-
-    def test_empty_mapping(self):
-        """
-        Test with an empty mapping of children.
-        """
-        children: dict[str, xr.DataTree] = {}
-        assert self.func(children) == (
-            "<div style='display: inline-grid; grid-template-columns: 100%; grid-column: 1 / -1'>"
-            "</div>"
-        )
-
-    def test_one_child(
-        self, childfree_tree, mock_wrap_datatree_repr, mock_datatree_node_repr
-    ):
-        """
-        Test with one child.
-
-        Uses a mock of _wrap_datatree_repr and _datatree_node_repr to essentially mock
-        the inline lambda function "lines_callback".
-        """
-        # Create mapping of children
-        children = {"a": childfree_tree}
-
-        # Expect first line to be produced from the first child, and
-        # wrapped as the last child
-        first_line = f"a {id(children['a'])} end//"
-
-        assert self.func(children) == (
-            "<div style='display: inline-grid; grid-template-columns: 100%; grid-column: 1 / -1'>"
-            f"{first_line}"
-            "</div>"
-        )
+class TestDataTreeTruncatesNodes:
+    def test_many_nodes(self) -> None:
+        # construct a datatree with 500 nodes
+        number_of_files = 20
+        number_of_groups = 25
+        tree_dict = {}
+        for f in range(number_of_files):
+            for g in range(number_of_groups):
+                tree_dict[f"file_{f}/group_{g}"] = xr.Dataset({"g": f * g})
 
-    def test_two_children(
-        self, childfree_tree_factory, mock_wrap_datatree_repr, mock_datatree_node_repr
-    ):
-        """
-        Test with two level deep children.
+        tree = xr.DataTree.from_dict(tree_dict)
+        with xr.set_options(display_style="html"):
+            result = tree._repr_html_()
 
-        Uses a mock of _wrap_datatree_repr and datatree_node_repr to essentially mock
-        the inline lambda function "lines_callback".
-        """
-
-        # Create mapping of children
-        children = {"a": childfree_tree_factory(), "b": childfree_tree_factory()}
-
-        # Expect first line to be produced from the first child, and
-        # wrapped as _not_ the last child
-        first_line = f"a {id(children['a'])} not end//"
-
-        # Expect second line to be produced from the second child, and
-        # wrapped as the last child
-        second_line = f"b {id(children['b'])} end//"
-
-        assert self.func(children) == (
-            "<div style='display: inline-grid; grid-template-columns: 100%; grid-column: 1 / -1'>"
-            f"{first_line}"
-            f"{second_line}"
-            "</div>"
-        )
+        assert "6/20" in result
+        for i in range(number_of_files):
+            if i < 3 or i >= (number_of_files - 3):
+                assert f"file_{i}</div>" in result
+            else:
+                assert f"file_{i}</div>" not in result
+
+        assert "6/25" in result
+        for i in range(number_of_groups):
+            if i < 3 or i >= (number_of_groups - 3):
+                assert f"group_{i}</div>" in result
+            else:
+                assert f"group_{i}</div>" not in result
+
+        with xr.set_options(display_style="html", display_max_children=3):
+            result = tree._repr_html_()
+
+        assert "3/20" in result
+        for i in range(number_of_files):
+            if i < 2 or i >= (number_of_files - 1):
+                assert f"file_{i}</div>" in result
+            else:
+                assert f"file_{i}</div>" not in result
+
+        assert "3/25" in result
+        for i in range(number_of_groups):
+            if i < 2 or i >= (number_of_groups - 1):
+                assert f"group_{i}</div>" in result
+            else:
+                assert f"group_{i}</div>" not in result
 
 
 class TestDataTreeInheritance:
     def test_inherited_section_present(self) -> None:
-        dt = xr.DataTree.from_dict(
-            {
-                "/": None,
-                "a": None,
-            }
-        )
-        with xr.set_options(display_style="html"):
-            html = dt._repr_html_().strip()
-        # checks that the section appears somewhere
-        assert "Inherited coordinates" in html
-
-        # TODO how can we assert that the Inherited coordinates section does not appear in the child group?
-        # with xr.set_options(display_style="html"):
-        #     child_html = dt["a"]._repr_html_().strip()
-        # assert "Inherited coordinates" not in child_html
-
-
-class Test__wrap_datatree_repr:
-    """
-    Unit tests for _wrap_datatree_repr.
-    """
-
-    func = staticmethod(fh._wrap_datatree_repr)
-
-    def test_end(self, repr):
-        """
-        Test with end=True.
-        """
-        r = self.func(repr, end=True)
-        assert r == (
-            "<div style='display: inline-grid; grid-template-columns: 0px 20px auto; width: 100%;'>"
-            "<div style='"
-            "grid-column-start: 1;"
-            "border-right: 0.2em solid;"
-            "border-color: var(--xr-border-color);"
-            "height: 1.2em;"
-            "width: 0px;"
-            "'>"
-            "</div>"
-            "<div style='"
-            "grid-column-start: 2;"
-            "grid-row-start: 1;"
-            "height: 1em;"
-            "width: 20px;"
-            "border-bottom: 0.2em solid;"
-            "border-color: var(--xr-border-color);"
-            "'>"
-            "</div>"
-            "<div style='"
-            "grid-column-start: 3;"
-            "'>"
-            f"{repr}"
-            "</div>"
-            "</div>"
-        )
+        dt = xr.DataTree.from_dict(data={"a/b/c": None}, coords={"x": [1]})
 
-    def test_not_end(self, repr):
-        """
-        Test with end=False.
-        """
-        r = self.func(repr, end=False)
-        assert r == (
-            "<div style='display: inline-grid; grid-template-columns: 0px 20px auto; width: 100%;'>"
-            "<div style='"
-            "grid-column-start: 1;"
-            "border-right: 0.2em solid;"
-            "border-color: var(--xr-border-color);"
-            "height: 100%;"
-            "width: 0px;"
-            "'>"
-            "</div>"
-            "<div style='"
-            "grid-column-start: 2;"
-            "grid-row-start: 1;"
-            "height: 1em;"
-            "width: 20px;"
-            "border-bottom: 0.2em solid;"
-            "border-color: var(--xr-border-color);"
-            "'>"
-            "</div>"
-            "<div style='"
-            "grid-column-start: 3;"
-            "'>"
-            f"{repr}"
-            "</div>"
-            "</div>"
-        )
+        root_html = dt._repr_html_()
+        assert "Inherited coordinates" not in root_html
+
+        child_html = xarray_html_only_repr(dt["a"])
+        assert child_html.count("Inherited coordinates") == 1
+
+    def test_repr_consistency(self) -> None:
+        dt = xr.DataTree.from_dict({"/a/b/c": None})
+        assert_consistent_text_and_html_datatree(dt)
+        assert_consistent_text_and_html_datatree(dt["a"])
+        assert_consistent_text_and_html_datatree(dt["a/b"])
+        assert_consistent_text_and_html_datatree(dt["a/b/c"])
+
+    def test_no_repeated_style_or_fallback_text(self) -> None:
+        dt = xr.DataTree.from_dict({"/a/b/c": None})
+        html = dt._repr_html_()
+        assert html.count("<style>") == 1
+        assert html.count("<pre class='xr-text-repr-fallback'>") == 1
diff -pruN 2025.03.1-8/xarray/tests/test_groupby.py 2025.10.1-1/xarray/tests/test_groupby.py
--- 2025.03.1-8/xarray/tests/test_groupby.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/xarray/tests/test_groupby.py	2025-10-10 10:38:05.000000000 +0000
@@ -4,7 +4,7 @@ import datetime
 import operator
 import warnings
 from itertools import pairwise
-from typing import Literal
+from typing import Literal, cast
 from unittest import mock
 
 import numpy as np
@@ -13,19 +13,23 @@ import pytest
 from packaging.version import Version
 
 import xarray as xr
-from xarray import DataArray, Dataset, Variable
+from xarray import DataArray, Dataset, Variable, date_range
 from xarray.core.groupby import _consolidate_slices
 from xarray.core.types import InterpOptions, ResampleCompatible
 from xarray.groupers import (
     BinGrouper,
     EncodedGroups,
     Grouper,
+    SeasonGrouper,
+    SeasonResampler,
     TimeResampler,
     UniqueGrouper,
+    season_to_month_tuple,
 )
 from xarray.namedarray.pycompat import is_chunked_array
 from xarray.structure.alignment import broadcast
 from xarray.tests import (
+    _ALL_CALENDARS,
     InaccessibleArray,
     assert_allclose,
     assert_equal,
@@ -561,6 +565,9 @@ def test_ds_groupby_quantile() -> None:
     assert_identical(expected, actual)
 
 
+@pytest.mark.filterwarnings(
+    "default:The `interpolation` argument to quantile was renamed to `method`:FutureWarning"
+)
 @pytest.mark.parametrize("as_dataset", [False, True])
 def test_groupby_quantile_interpolation_deprecated(as_dataset: bool) -> None:
     array = xr.DataArray(data=[1, 2, 3, 4], coords={"x": [1, 1, 2, 2]}, dims="x")
@@ -615,7 +622,7 @@ def test_groupby_repr(obj, dim) -> None:
     N = len(np.unique(obj[dim]))
     expected = f"<{obj.__class__.__name__}GroupBy"
     expected += f", grouped over 1 grouper(s), {N} groups in total:"
-    expected += f"\n    {dim!r}: {N}/{N} groups present with labels "
+    expected += f"\n    {dim!r}: UniqueGrouper({dim!r}), {N}/{N} groups with labels "
     if dim == "x":
         expected += "1, 2, 3, 4, 5>"
     elif dim == "y":
@@ -632,7 +639,7 @@ def test_groupby_repr_datetime(obj) -> N
     actual = repr(obj.groupby("t.month"))
     expected = f"<{obj.__class__.__name__}GroupBy"
     expected += ", grouped over 1 grouper(s), 12 groups in total:\n"
-    expected += "    'month': 12/12 groups present with labels "
+    expected += "    'month': UniqueGrouper('month'), 12/12 groups with labels "
     expected += "1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12>"
     assert actual == expected
 
@@ -753,6 +760,12 @@ def test_groupby_grouping_errors() -> No
     with pytest.raises(ValueError, match=r"Failed to group data."):
         dataset.to_dataarray().groupby(dataset.foo * np.nan)
 
+    with pytest.raises(TypeError, match=r"Cannot group by a Grouper object"):
+        dataset.groupby(UniqueGrouper(labels=[1, 2, 3]))  # type: ignore[arg-type]
+
+    with pytest.raises(TypeError, match=r"got multiple values for argument"):
+        UniqueGrouper(dataset.x, labels=[1, 2, 3])  # type: ignore[misc]
+
 
 def test_groupby_reduce_dimension_error(array) -> None:
     grouped = array.groupby("y")
@@ -815,7 +828,7 @@ def test_groupby_getitem(dataset) -> Non
     assert_identical(dataset.cat.sel(y=[1]), dataset.cat.groupby("y")[1])
 
     with pytest.raises(
-        NotImplementedError, match="Cannot broadcast 1d-only pandas categorical array."
+        NotImplementedError, match=r"Cannot broadcast 1d-only pandas extension array."
     ):
         dataset.groupby("boo")
     dataset = dataset.drop_vars(["cat"])
@@ -1038,10 +1051,12 @@ def test_groupby_math_bitshift() -> None
     assert_equal(right_expected, right_actual)
 
 
+@pytest.mark.parametrize(
+    "x_bins", ((0, 2, 4, 6), pd.IntervalIndex.from_breaks((0, 2, 4, 6), closed="left"))
+)
 @pytest.mark.parametrize("use_flox", [True, False])
-def test_groupby_bins_cut_kwargs(use_flox: bool) -> None:
+def test_groupby_bins_cut_kwargs(use_flox: bool, x_bins) -> None:
     da = xr.DataArray(np.arange(12).reshape(6, 2), dims=("x", "y"))
-    x_bins = (0, 2, 4, 6)
 
     with xr.set_options(use_flox=use_flox):
         actual = da.groupby_bins(
@@ -1051,7 +1066,12 @@ def test_groupby_bins_cut_kwargs(use_flo
         np.array([[1.0, 2.0], [5.0, 6.0], [9.0, 10.0]]),
         dims=("x_bins", "y"),
         coords={
-            "x_bins": ("x_bins", pd.IntervalIndex.from_breaks(x_bins, closed="left"))
+            "x_bins": (
+                "x_bins",
+                x_bins
+                if isinstance(x_bins, pd.IntervalIndex)
+                else pd.IntervalIndex.from_breaks(x_bins, closed="left"),
+            )
         },
     )
     assert_identical(expected, actual)
@@ -1062,6 +1082,11 @@ def test_groupby_bins_cut_kwargs(use_flo
         ).mean()
     assert_identical(expected, actual)
 
+    with xr.set_options(use_flox=use_flox):
+        labels = ["one", "two", "three"]
+        actual = da.groupby(x=BinGrouper(bins=x_bins, labels=labels)).sum()
+        assert actual.xindexes["x_bins"].index.equals(pd.Index(labels))  # type: ignore[attr-defined]
+
 
 @pytest.mark.parametrize("indexed_coord", [True, False])
 @pytest.mark.parametrize(
@@ -1118,7 +1143,8 @@ def test_groupby_math_nD_group() -> None
     expected = da.isel(x=slice(30)) - expanded_mean
     expected["labels"] = expected.labels.broadcast_like(expected.labels2d)
     expected["num"] = expected.num.broadcast_like(expected.num2d)
-    expected["num2d_bins"] = (("x", "y"), mean.num2d_bins.data[idxr])
+    # mean.num2d_bins.data is a pandas IntervalArray so needs to be put in `numpy` to allow indexing
+    expected["num2d_bins"] = (("x", "y"), mean.num2d_bins.data.to_numpy()[idxr])
     actual = g - mean
     assert_identical(expected, actual)
 
@@ -1298,8 +1324,7 @@ class TestDataArrayGroupBy:
         grouped = self.da.groupby("abc")
         expected_groups = {"a": range(9), "c": [9], "b": range(10, 20)}
         assert expected_groups.keys() == grouped.groups.keys()
-        for key in expected_groups:
-            expected_group = expected_groups[key]
+        for key, expected_group in expected_groups.items():
             actual_group = grouped.groups[key]
 
             # TODO: array_api doesn't allow slice:
@@ -1639,6 +1664,19 @@ class TestDataArrayGroupBy:
             actual_sum = array.groupby(dim).sum(...)
             assert_identical(expected_sum, actual_sum)
 
+        if has_flox:
+            # GH9803
+            # reduce over one dim of a nD grouper
+            array.coords["labels"] = (("ny", "nx"), np.array([["a", "b"], ["b", "a"]]))
+            actual = array.groupby("labels").sum("nx")
+            expected_np = np.array([[[0, 1], [3, 2]], [[5, 10], [20, 15]]])
+            expected = xr.DataArray(
+                expected_np,
+                dims=("time", "ny", "labels"),
+                coords={"labels": ["a", "b"]},
+            )
+            assert_identical(expected, actual)
+
     def test_groupby_multidim_map(self) -> None:
         array = self.make_groupby_multidim_example_array()
         actual = array.groupby("lon").map(lambda x: x - x.mean())
@@ -1680,13 +1718,9 @@ class TestDataArrayGroupBy:
         df["dim_0_bins"] = pd.cut(array["dim_0"], bins, **cut_kwargs)  # type: ignore[call-overload]
 
         expected_df = df.groupby("dim_0_bins", observed=True).sum()
-        # TODO: can't convert df with IntervalIndex to Xarray
-        expected = (
-            expected_df.reset_index(drop=True)
-            .to_xarray()
-            .assign_coords(index=np.array(expected_df.index))
-            .rename({"index": "dim_0_bins"})["a"]
-        )
+        expected = expected_df.to_xarray().assign_coords(
+            dim_0_bins=cast(pd.CategoricalIndex, expected_df.index).categories
+        )["a"]
 
         with xr.set_options(use_flox=use_flox):
             gb = array.groupby_bins("dim_0", bins=bins, **cut_kwargs)
@@ -2188,7 +2222,7 @@ class TestDataArrayResample:
             f = interp1d(
                 np.arange(len(times)),
                 data,
-                kind=kwargs["order"] if kind == "polynomial" else kind,
+                kind=kwargs["order"] if kind == "polynomial" else kind,  # type: ignore[arg-type,unused-ignore]
                 axis=-1,
                 bounds_error=True,
                 assume_sorted=True,
@@ -2266,7 +2300,7 @@ class TestDataArrayResample:
             f = interp1d(
                 np.arange(len(times)),
                 data,
-                kind=kwargs["order"] if kind == "polynomial" else kind,
+                kind=kwargs["order"] if kind == "polynomial" else kind,  # type: ignore[arg-type,unused-ignore]
                 axis=-1,
                 bounds_error=True,
                 assume_sorted=True,
@@ -2407,6 +2441,7 @@ class TestDatasetResample:
                 for i in range(3)
             ],
             dim=actual["time"],
+            data_vars="all",
         )
         assert_allclose(expected, actual)
 
@@ -2914,33 +2949,21 @@ def test_multiple_groupers(use_flox: boo
 
     if has_dask:
         b["xy"] = b["xy"].chunk()
-        for eagerly_compute_group in [True, False]:
-            kwargs = dict(
-                x=UniqueGrouper(),
-                xy=UniqueGrouper(labels=["a", "b", "c"]),
-                eagerly_compute_group=eagerly_compute_group,
-            )
-            expected = xr.DataArray(
-                [[[1, 1, 1], [np.nan, 1, 2]]] * 4,
-                dims=("z", "x", "xy"),
-                coords={"xy": ("xy", ["a", "b", "c"], {"foo": "bar"})},
-            )
-            if eagerly_compute_group:
-                with raise_if_dask_computes(max_computes=1):
-                    with pytest.warns(DeprecationWarning):
-                        gb = b.groupby(**kwargs)  # type: ignore[arg-type]
-                    assert_identical(gb.count(), expected)
-            else:
-                with raise_if_dask_computes(max_computes=0):
-                    gb = b.groupby(**kwargs)  # type: ignore[arg-type]
-                assert is_chunked_array(gb.encoded.codes.data)
-                assert not gb.encoded.group_indices
-                if has_flox:
-                    with raise_if_dask_computes(max_computes=1):
-                        assert_identical(gb.count(), expected)
-                else:
-                    with pytest.raises(ValueError, match="when lazily grouping"):
-                        gb.count()
+        expected = xr.DataArray(
+            [[[1, 1, 1], [np.nan, 1, 2]]] * 4,
+            dims=("z", "x", "xy"),
+            coords={"xy": ("xy", ["a", "b", "c"], {"foo": "bar"})},
+        )
+        with raise_if_dask_computes(max_computes=0):
+            gb = b.groupby(x=UniqueGrouper(), xy=UniqueGrouper(labels=["a", "b", "c"]))
+        assert is_chunked_array(gb.encoded.codes.data)
+        assert not gb.encoded.group_indices
+        if has_flox:
+            with raise_if_dask_computes(max_computes=1):
+                assert_identical(gb.count(), expected)
+        else:
+            with pytest.raises(ValueError, match="when lazily grouping"):
+                gb.count()
 
 
 @pytest.mark.parametrize("use_flox", [True, False])
@@ -3101,9 +3124,7 @@ def test_lazy_grouping(grouper, expect_i
 
     if has_flox:
         lazy = (
-            xr.Dataset({"foo": data}, coords={"zoo": data})
-            .groupby(zoo=grouper, eagerly_compute_group=False)
-            .count()
+            xr.Dataset({"foo": data}, coords={"zoo": data}).groupby(zoo=grouper).count()
         )
         assert_identical(eager, lazy)
 
@@ -3119,9 +3140,7 @@ def test_lazy_grouping_errors() -> None:
         coords={"y": ("x", dask.array.arange(20, chunks=3))},
     )
 
-    gb = data.groupby(
-        y=UniqueGrouper(labels=np.arange(5, 10)), eagerly_compute_group=False
-    )
+    gb = data.groupby(y=UniqueGrouper(labels=np.arange(5, 10)))
     message = "not supported when lazily grouping by"
     with pytest.raises(ValueError, match=message):
         gb.map(lambda x: x)
@@ -3181,6 +3200,16 @@ def test_multiple_grouper_unsorted_order
     assert_identical(actual2, expected2)
 
 
+def test_multiple_grouper_empty_groups() -> None:
+    ds = xr.Dataset(
+        {"foo": (("x", "y"), np.random.rand(4, 3))},
+        coords={"x": [10, 20, 30, 40], "letters": ("x", list("abba"))},
+    )
+
+    groups = ds.groupby(x=BinGrouper(bins=[5, 15, 25]), letters=UniqueGrouper())
+    assert len(groups.groups) == 2
+
+
 def test_groupby_multiple_bin_grouper_missing_groups() -> None:
     from numpy import nan
 
@@ -3220,7 +3249,7 @@ def test_shuffle_simple() -> None:
     da = xr.DataArray(
         dims="x",
         data=dask.array.from_array([1, 2, 3, 4, 5, 6], chunks=2),
-        coords={"label": ("x", "a b c a b c".split(" "))},
+        coords={"label": ("x", ["a", "b", "c", "a", "b", "c"])},
     )
     actual = da.groupby(label=UniqueGrouper()).shuffle_to_chunks()
     expected = da.isel(x=[0, 3, 1, 4, 2, 5])
@@ -3241,8 +3270,6 @@ def test_shuffle_simple() -> None:
 def test_shuffle_by(chunks, expected_chunks):
     import dask.array
 
-    from xarray.groupers import UniqueGrouper
-
     da = xr.DataArray(
         dims="x",
         data=dask.array.arange(10, chunks=chunks),
@@ -3264,32 +3291,557 @@ def test_groupby_dask_eager_load_warning
         coords={"x": ("z", np.arange(12)), "y": ("z", np.arange(12))},
     ).chunk(z=6)
 
-    with pytest.warns(DeprecationWarning):
-        ds.groupby(x=UniqueGrouper())
-
-    with pytest.warns(DeprecationWarning):
-        ds.groupby("x")
-
-    with pytest.warns(DeprecationWarning):
-        ds.groupby(ds.x)
-
     with pytest.raises(ValueError, match="Please pass"):
-        ds.groupby("x", eagerly_compute_group=False)
+        with pytest.warns(DeprecationWarning):
+            ds.groupby("x", eagerly_compute_group=False)
+    with pytest.raises(ValueError, match="Eagerly computing"):
+        ds.groupby("x", eagerly_compute_group=True)  # type: ignore[arg-type]
 
     # This is technically fine but anyone iterating over the groupby object
     # will see an error, so let's warn and have them opt-in.
-    with pytest.warns(DeprecationWarning):
-        ds.groupby(x=UniqueGrouper(labels=[1, 2, 3]))
-
-    ds.groupby(x=UniqueGrouper(labels=[1, 2, 3]), eagerly_compute_group=False)
+    ds.groupby(x=UniqueGrouper(labels=[1, 2, 3]))
 
     with pytest.warns(DeprecationWarning):
-        ds.groupby_bins("x", bins=3)
+        ds.groupby(x=UniqueGrouper(labels=[1, 2, 3]), eagerly_compute_group=False)
+
     with pytest.raises(ValueError, match="Please pass"):
-        ds.groupby_bins("x", bins=3, eagerly_compute_group=False)
+        with pytest.warns(DeprecationWarning):
+            ds.groupby_bins("x", bins=3, eagerly_compute_group=False)
+    with pytest.raises(ValueError, match="Eagerly computing"):
+        ds.groupby_bins("x", bins=3, eagerly_compute_group=True)  # type: ignore[arg-type]
+    ds.groupby_bins("x", bins=[1, 2, 3])
     with pytest.warns(DeprecationWarning):
-        ds.groupby_bins("x", bins=[1, 2, 3])
-    ds.groupby_bins("x", bins=[1, 2, 3], eagerly_compute_group=False)
+        ds.groupby_bins("x", bins=[1, 2, 3], eagerly_compute_group=False)
+
+
+class TestSeasonGrouperAndResampler:
+    def test_season_to_month_tuple(self):
+        assert season_to_month_tuple(["JF", "MAM", "JJAS", "OND"]) == (
+            (1, 2),
+            (3, 4, 5),
+            (6, 7, 8, 9),
+            (10, 11, 12),
+        )
+        assert season_to_month_tuple(["DJFM", "AM", "JJAS", "ON"]) == (
+            (12, 1, 2, 3),
+            (4, 5),
+            (6, 7, 8, 9),
+            (10, 11),
+        )
+
+    def test_season_grouper_raises_error_if_months_are_not_valid_or_not_continuous(
+        self,
+    ):
+        calendar = "standard"
+        time = date_range("2001-01-01", "2002-12-30", freq="D", calendar=calendar)
+        da = DataArray(np.ones(time.size), dims="time", coords={"time": time})
+
+        with pytest.raises(KeyError, match="IN"):
+            da.groupby(time=SeasonGrouper(["INVALID_SEASON"]))
+
+        with pytest.raises(KeyError, match="MD"):
+            da.groupby(time=SeasonGrouper(["MDF"]))
+
+    @pytest.mark.parametrize("calendar", _ALL_CALENDARS)
+    def test_season_grouper_with_months_spanning_calendar_year_using_same_year(
+        self, calendar
+    ):
+        time = date_range("2001-01-01", "2002-12-30", freq="MS", calendar=calendar)
+        # fmt: off
+        data = np.array(
+            [
+                1.0, 1.25, 1.5, 1.75, 2.0, 1.1, 1.35, 1.6, 1.85, 1.2, 1.45, 1.7,
+                1.95, 1.05, 1.3, 1.55, 1.8, 1.15, 1.4, 1.65, 1.9, 1.25, 1.5, 1.75,
+            ]
+
+        )
+        # fmt: on
+        da = DataArray(data, dims="time", coords={"time": time})
+        da["year"] = da.time.dt.year
+
+        actual = da.groupby(
+            year=UniqueGrouper(), time=SeasonGrouper(["NDJFM", "AMJ"])
+        ).mean()
+
+        # Expected if the same year "ND" is used for seasonal grouping
+        expected = xr.DataArray(
+            data=np.array([[1.38, 1.616667], [1.51, 1.5]]),
+            dims=["year", "season"],
+            coords={"year": [2001, 2002], "season": ["NDJFM", "AMJ"]},
+        )
+
+        assert_allclose(expected, actual)
+
+    @pytest.mark.parametrize("calendar", _ALL_CALENDARS)
+    def test_season_grouper_with_partial_years(self, calendar):
+        time = date_range("2001-01-01", "2002-06-30", freq="MS", calendar=calendar)
+        # fmt: off
+        data = np.array(
+            [
+                1.0, 1.25, 1.5, 1.75, 2.0, 1.1, 1.35, 1.6, 1.85, 1.2, 1.45, 1.7,
+                1.95, 1.05, 1.3, 1.55, 1.8, 1.15,
+            ]
+        )
+        # fmt: on
+        da = DataArray(data, dims="time", coords={"time": time})
+        da["year"] = da.time.dt.year
+
+        actual = da.groupby(
+            year=UniqueGrouper(), time=SeasonGrouper(["NDJFM", "AMJ"])
+        ).mean()
+
+        # Expected if partial years are handled correctly
+        expected = xr.DataArray(
+            data=np.array([[1.38, 1.616667], [1.43333333, 1.5]]),
+            dims=["year", "season"],
+            coords={"year": [2001, 2002], "season": ["NDJFM", "AMJ"]},
+        )
+
+        assert_allclose(expected, actual)
+
+    @pytest.mark.parametrize("calendar", ["standard"])
+    def test_season_grouper_with_single_month_seasons(self, calendar):
+        time = date_range("2001-01-01", "2002-12-30", freq="MS", calendar=calendar)
+        # fmt: off
+        data = np.array(
+            [
+                1.0, 1.25, 1.5, 1.75, 2.0, 1.1, 1.35, 1.6, 1.85, 1.2, 1.45, 1.7,
+                1.95, 1.05, 1.3, 1.55, 1.8, 1.15, 1.4, 1.65, 1.9, 1.25, 1.5, 1.75,
+            ]
+        )
+        # fmt: on
+        da = DataArray(data, dims="time", coords={"time": time})
+        da["year"] = da.time.dt.year
+
+        # TODO: Consider supporting this if needed
+        # It does not work without flox, because the group labels are not unique,
+        # and so the stack/unstack approach does not work.
+        with pytest.raises(ValueError):
+            da.groupby(
+                year=UniqueGrouper(),
+                time=SeasonGrouper(
+                    ["J", "F", "M", "A", "M", "J", "J", "A", "S", "O", "N", "D"]
+                ),
+            ).mean()
+
+        # Expected if single month seasons are handled correctly
+        # expected = xr.DataArray(
+        #     data=np.array(
+        #         [
+        #             [1.0, 1.25, 1.5, 1.75, 2.0, 1.1, 1.35, 1.6, 1.85, 1.2, 1.45, 1.7],
+        #             [1.95, 1.05, 1.3, 1.55, 1.8, 1.15, 1.4, 1.65, 1.9, 1.25, 1.5, 1.75],
+        #         ]
+        #     ),
+        #     dims=["year", "season"],
+        #     coords={
+        #         "year": [2001, 2002],
+        #         "season": ["J", "F", "M", "A", "M", "J", "J", "A", "S", "O", "N", "D"],
+        #     },
+        # )
+        # assert_allclose(expected, actual)
+
+    @pytest.mark.parametrize("calendar", _ALL_CALENDARS)
+    def test_season_grouper_with_months_spanning_calendar_year_using_previous_year(
+        self, calendar
+    ):
+        time = date_range("2001-01-01", "2002-12-30", freq="MS", calendar=calendar)
+        # fmt: off
+        data = np.array(
+            [
+                1.0, 1.25, 1.5, 1.75, 2.0, 1.1, 1.35, 1.6, 1.85, 1.2, 1.45, 1.7,
+                1.95, 1.05, 1.3, 1.55, 1.8, 1.15, 1.4, 1.65, 1.9, 1.25, 1.5, 1.75,
+            ]
+        )
+        # fmt: on
+        da = DataArray(data, dims="time", coords={"time": time})
+
+        gb = da.resample(time=SeasonResampler(["NDJFM", "AMJ"], drop_incomplete=False))
+        actual = gb.mean()
+
+        # fmt: off
+        new_time_da = xr.DataArray(
+            dims="time",
+            data=pd.DatetimeIndex(
+                [
+                    "2000-11-01", "2001-04-01", "2001-11-01", "2002-04-01", "2002-11-01"
+                ]
+            ),
+        )
+        # fmt: on
+        if calendar != "standard":
+            new_time_da = new_time_da.convert_calendar(
+                calendar=calendar, align_on="date"
+            )
+        new_time = new_time_da.time.variable
+
+        # Expected if the previous "ND" is used for seasonal grouping
+        expected = xr.DataArray(
+            data=np.array([1.25, 1.616667, 1.49, 1.5, 1.625]),
+            dims="time",
+            coords={"time": new_time},
+        )
+        assert_allclose(expected, actual)
+
+    @pytest.mark.parametrize("calendar", _ALL_CALENDARS)
+    def test_season_grouper_simple(self, calendar) -> None:
+        time = date_range("2001-01-01", "2002-12-30", freq="D", calendar=calendar)
+        da = DataArray(np.ones(time.size), dims="time", coords={"time": time})
+        expected = da.groupby("time.season").mean()
+        # note season order matches expected
+        actual = da.groupby(
+            time=SeasonGrouper(
+                ["DJF", "JJA", "MAM", "SON"],  # drop_incomplete=False
+            )
+        ).mean()
+        assert_identical(expected, actual)
+
+    @pytest.mark.parametrize("seasons", [["JJA", "MAM", "SON", "DJF"]])
+    def test_season_resampling_raises_unsorted_seasons(self, seasons):
+        calendar = "standard"
+        time = date_range("2001-01-01", "2002-12-30", freq="D", calendar=calendar)
+        da = DataArray(np.ones(time.size), dims="time", coords={"time": time})
+        with pytest.raises(ValueError, match="sort"):
+            da.resample(time=SeasonResampler(seasons))
+
+    @pytest.mark.parametrize(
+        "use_cftime", [pytest.param(True, marks=requires_cftime), False]
+    )
+    @pytest.mark.parametrize("drop_incomplete", [True, False])
+    @pytest.mark.parametrize(
+        "seasons",
+        [
+            pytest.param(["DJF", "MAM", "JJA", "SON"], id="standard"),
+            pytest.param(["NDJ", "FMA", "MJJ", "ASO"], id="nov-first"),
+            pytest.param(["MAM", "JJA", "SON", "DJF"], id="standard-diff-order"),
+            pytest.param(["JFM", "AMJ", "JAS", "OND"], id="december-same-year"),
+            pytest.param(["DJF", "MAM", "JJA", "ON"], id="skip-september"),
+            pytest.param(["JJAS"], id="jjas-only"),
+        ],
+    )
+    def test_season_resampler(
+        self, seasons: list[str], drop_incomplete: bool, use_cftime: bool
+    ) -> None:
+        calendar = "standard"
+        time = date_range(
+            "2001-01-01",
+            "2002-12-30",
+            freq="D",
+            calendar=calendar,
+            use_cftime=use_cftime,
+        )
+        da = DataArray(np.ones(time.size), dims="time", coords={"time": time})
+        counts = da.resample(time="ME").count()
+
+        seasons_as_ints = season_to_month_tuple(seasons)
+        month = counts.time.dt.month.data
+        year = counts.time.dt.year.data
+        for season, as_ints in zip(seasons, seasons_as_ints, strict=True):
+            if "DJ" in season:
+                for imonth in as_ints[season.index("D") + 1 :]:
+                    year[month == imonth] -= 1
+        counts["time"] = (
+            "time",
+            [pd.Timestamp(f"{y}-{m}-01") for y, m in zip(year, month, strict=True)],
+        )
+        if has_cftime:
+            counts = counts.convert_calendar(calendar, "time", align_on="date")
+
+        expected_vals = []
+        expected_time = []
+        for year in [2001, 2002, 2003]:
+            for season, as_ints in zip(seasons, seasons_as_ints, strict=True):
+                out_year = year
+                if "DJ" in season:
+                    out_year = year - 1
+                if out_year == 2003:
+                    # this is a dummy year added to make sure we cover 2002-DJF
+                    continue
+                available = [
+                    counts.sel(time=f"{out_year}-{month:02d}").data for month in as_ints
+                ]
+                if any(len(a) == 0 for a in available) and drop_incomplete:
+                    continue
+                output_label = pd.Timestamp(f"{out_year}-{as_ints[0]:02d}-01")
+                expected_time.append(output_label)
+                # use concatenate to handle empty array when dec value does not exist
+                expected_vals.append(np.concatenate(available).sum())
+
+        expected = (
+            # we construct expected in the standard calendar
+            xr.DataArray(expected_vals, dims="time", coords={"time": expected_time})
+        )
+        if has_cftime:
+            # and then convert to the expected calendar,
+            expected = expected.convert_calendar(
+                calendar, align_on="date", use_cftime=use_cftime
+            )
+        # and finally sort since DJF will be out-of-order
+        expected = expected.sortby("time")
+
+        rs = SeasonResampler(seasons, drop_incomplete=drop_incomplete)
+        # through resample
+        actual = da.resample(time=rs).sum()
+        assert_identical(actual, expected)
+
+    @requires_cftime
+    def test_season_resampler_errors(self):
+        time = date_range("2001-01-01", "2002-12-30", freq="D", calendar="360_day")
+        da = DataArray(np.ones(time.size), dims="time", coords={"time": time})
+
+        # non-datetime array
+        with pytest.raises(ValueError):
+            DataArray(np.ones(5), dims="time").groupby(time=SeasonResampler(["DJF"]))
+
+        # ndim > 1 array
+        with pytest.raises(ValueError):
+            DataArray(
+                np.ones((5, 5)), dims=("t", "x"), coords={"x": np.arange(5)}
+            ).groupby(x=SeasonResampler(["DJF"]))
+
+        # overlapping seasons
+        with pytest.raises(ValueError):
+            da.groupby(time=SeasonResampler(["DJFM", "MAMJ", "JJAS", "SOND"])).sum()
+
+    @requires_cftime
+    def test_season_resampler_groupby_identical(self):
+        time = date_range("2001-01-01", "2002-12-30", freq="D")
+        da = DataArray(np.ones(time.size), dims="time", coords={"time": time})
+
+        # through resample
+        resampler = SeasonResampler(["DJF", "MAM", "JJA", "SON"])
+        rs = da.resample(time=resampler).sum()
+
+        # through groupby
+        gb = da.groupby(time=resampler).sum()
+        assert_identical(rs, gb)
+
+
+@pytest.mark.parametrize(
+    "chunk",
+    [
+        pytest.param(
+            True, marks=pytest.mark.skipif(not has_dask, reason="requires dask")
+        ),
+        False,
+    ],
+)
+def test_datetime_mean(chunk, use_cftime):
+    ds = xr.Dataset(
+        {
+            "var1": (
+                ("time",),
+                xr.date_range(
+                    "2021-10-31", periods=10, freq="D", use_cftime=use_cftime
+                ),
+            ),
+            "var2": (("x",), list(range(10))),
+        }
+    )
+    if chunk:
+        ds = ds.chunk()
+    assert "var1" in ds.groupby("x").mean("time")
+    assert "var1" in ds.mean("x")
+
+
+def test_mean_with_mixed_types():
+    """Test that mean correctly handles datasets with mixed types including strings"""
+    ds = xr.Dataset(
+        {
+            "numbers": (("x",), [1.0, 2.0, 3.0, 4.0]),
+            "integers": (("x",), [10, 20, 30, 40]),
+            "strings": (("x",), ["a", "b", "c", "d"]),
+            "datetime": (
+                ("x",),
+                pd.date_range("2021-01-01", periods=4, freq="D"),
+            ),
+            "timedelta": (
+                ("x",),
+                pd.timedelta_range("1 day", periods=4, freq="D"),
+            ),
+        }
+    )
+
+    # Direct mean should exclude strings but include datetime/timedelta
+    result = ds.mean()
+    assert "numbers" in result.data_vars
+    assert "integers" in result.data_vars
+    assert "strings" not in result.data_vars
+    assert "datetime" in result.data_vars
+    assert "timedelta" in result.data_vars
+
+    # Also test mean with specific dimension
+    result_dim = ds.mean("x")
+    assert "numbers" in result_dim.data_vars
+    assert "integers" in result_dim.data_vars
+    assert "strings" not in result_dim.data_vars
+    assert "datetime" in result_dim.data_vars
+    assert "timedelta" in result_dim.data_vars
+
+
+def test_mean_with_string_coords():
+    """Test that mean works when strings are in coordinates, not data vars"""
+    ds = xr.Dataset(
+        {
+            "temperature": (("city", "time"), np.random.rand(3, 4)),
+            "humidity": (("city", "time"), np.random.rand(3, 4)),
+        },
+        coords={
+            "city": ["New York", "London", "Tokyo"],
+            "time": pd.date_range("2021-01-01", periods=4, freq="D"),
+        },
+    )
+
+    # Mean across string coordinate should work
+    result = ds.mean("city")
+    assert result.sizes == {"time": 4}
+    assert "temperature" in result.data_vars
+    assert "humidity" in result.data_vars
+
+    # Groupby with string coordinate should work
+    grouped = ds.groupby("city")
+    result_grouped = grouped.mean()
+    assert "temperature" in result_grouped.data_vars
+    assert "humidity" in result_grouped.data_vars
+
+
+def test_mean_datetime_edge_cases():
+    """Test mean with datetime edge cases like NaT"""
+    # Test with NaT values
+    dates_with_nat = pd.date_range("2021-01-01", periods=4, freq="D")
+    dates_with_nat_array = dates_with_nat.values.copy()
+    dates_with_nat_array[1] = np.datetime64("NaT")
+
+    ds = xr.Dataset(
+        {
+            "dates": (("x",), dates_with_nat_array),
+            "values": (("x",), [1.0, 2.0, 3.0, 4.0]),
+        }
+    )
+
+    # Mean should handle NaT properly (skipna behavior)
+    result = ds.mean()
+    assert "dates" in result.data_vars
+    assert "values" in result.data_vars
+    # The mean should skip NaT and compute mean of the other 3 dates
+    assert not result.dates.isnull().item()
+
+    # Test with timedelta
+    timedeltas = pd.timedelta_range("1 day", periods=4, freq="D")
+    ds_td = xr.Dataset(
+        {
+            "timedeltas": (("x",), timedeltas),
+            "values": (("x",), [1.0, 2.0, 3.0, 4.0]),
+        }
+    )
+
+    result_td = ds_td.mean()
+    assert "timedeltas" in result_td.data_vars
+    assert result_td["timedeltas"].values == np.timedelta64(
+        216000000000000, "ns"
+    )  # 2.5 days
+
+
+@requires_cftime
+def test_mean_with_cftime_objects():
+    """Test mean with cftime objects (issue #5897)"""
+    ds = xr.Dataset(
+        {
+            "var1": (
+                ("time",),
+                xr.date_range("2021-10-31", periods=10, freq="D", use_cftime=True),
+            ),
+            "var2": (("x",), list(range(10))),
+        }
+    )
+
+    # Test averaging over time dimension - var1 should be included
+    result_time = ds.mean("time")
+    assert "var1" in result_time.data_vars
+    assert "var2" not in result_time.dims
+
+    # Test averaging over x dimension - should work normally
+    result_x = ds.mean("x")
+    assert "var2" in result_x.data_vars
+    assert "var1" in result_x.data_vars
+    assert result_x.var2.item() == 4.5  # mean of 0-9
+
+    # Test that mean preserves object arrays containing datetime-like objects
+    import cftime
+
+    dates = np.array(
+        [cftime.DatetimeNoLeap(2021, i, 1) for i in range(1, 5)], dtype=object
+    )
+    ds2 = xr.Dataset(
+        {
+            "cftime_dates": (("x",), dates),
+            "numbers": (("x",), [1.0, 2.0, 3.0, 4.0]),
+            "object_strings": (("x",), np.array(["a", "b", "c", "d"], dtype=object)),
+        }
+    )
+
+    # Mean should include cftime dates but not string objects
+    result = ds2.mean()
+    assert "cftime_dates" in result.data_vars
+    assert "numbers" in result.data_vars
+    assert "object_strings" not in result.data_vars
+
+
+@requires_dask
+@requires_cftime
+def test_mean_with_cftime_objects_dask():
+    """Test mean with cftime objects using dask backend (issue #5897)"""
+    ds = xr.Dataset(
+        {
+            "var1": (
+                ("time",),
+                xr.date_range("2021-10-31", periods=10, freq="D", use_cftime=True),
+            ),
+            "var2": (("x",), list(range(10))),
+        }
+    )
+
+    # Test with dask backend
+    dsc = ds.chunk({})
+    result_time_dask = dsc.mean("time")
+    assert "var1" in result_time_dask.data_vars
+
+    result_x_dask = dsc.mean("x")
+    assert "var2" in result_x_dask.data_vars
+    assert result_x_dask.var2.compute().item() == 4.5
+
+
+def test_groupby_bins_datetime_mean():
+    """Test groupby_bins with datetime mean (issue #6995)"""
+    times = pd.date_range("2020-01-01", "2020-02-01", freq="1h")
+    index = np.arange(len(times))
+    bins = np.arange(0, len(index), 5)
+
+    ds = xr.Dataset(
+        {"time": ("index", times), "float": ("index", np.linspace(0, 1, len(index)))},
+        coords={"index": index},
+    )
+
+    # The time variable should be preserved and averaged
+    result = ds.groupby_bins("index", bins).mean()
+    assert "time" in result.data_vars
+    assert "float" in result.data_vars
+    assert result.time.dtype == np.dtype("datetime64[ns]")
+
+
+def test_groupby_bins_mean_time_series():
+    """Test groupby_bins mean on time series data (issue #10217)"""
+    ds = xr.Dataset(
+        {
+            "measurement": ("trial", np.arange(0, 100, 10)),
+            "time": ("trial", pd.date_range("20240101T1500", "20240101T1501", 10)),
+        }
+    )
+
+    # Time variable should be preserved in the aggregation
+    ds_agged = ds.groupby_bins("trial", 5).mean()
+    assert "time" in ds_agged.data_vars
+    assert "measurement" in ds_agged.data_vars
+    assert ds_agged.time.dtype == np.dtype("datetime64[ns]")
 
 
 # TODO: Possible property tests to add to this module
diff -pruN 2025.03.1-8/xarray/tests/test_indexes.py 2025.10.1-1/xarray/tests/test_indexes.py
--- 2025.03.1-8/xarray/tests/test_indexes.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/xarray/tests/test_indexes.py	2025-10-10 10:38:05.000000000 +0000
@@ -346,7 +346,7 @@ class TestPandasMultiIndex:
             "bar": bar_data.dtype,
         }
 
-        with pytest.raises(ValueError, match=".*conflicting multi-index level name.*"):
+        with pytest.raises(ValueError, match=r".*conflicting multi-index level name.*"):
             PandasMultiIndex(pd_idx, "foo")
 
         # default level names
@@ -636,10 +636,10 @@ class TestIndexes:
         }
         assert indexes.get_all_coords("one") == expected
 
-        with pytest.raises(ValueError, match="errors must be.*"):
+        with pytest.raises(ValueError, match=r"errors must be.*"):
             indexes.get_all_coords("x", errors="invalid")
 
-        with pytest.raises(ValueError, match="no index found.*"):
+        with pytest.raises(ValueError, match=r"no index found.*"):
             indexes.get_all_coords("no_coord")
 
         assert indexes.get_all_coords("no_coord", errors="ignore") == {}
@@ -729,3 +729,54 @@ def test_restore_dtype_on_multiindexes(d
     foo = xr.Dataset(coords={"bar": ("bar", np.array([0, 1], dtype=dtype))})
     foo = foo.stack(baz=("bar",))
     assert str(foo["bar"].values.dtype) == dtype
+
+
+class IndexWithExtraVariables(Index):
+    @classmethod
+    def from_variables(cls, variables, *, options=None):
+        return cls()
+
+    def create_variables(self, variables=None):
+        if variables is None:
+            # For Coordinates.from_xindex(), return all variables the index can create
+            return {
+                "time": Variable(dims=("time",), data=[1, 2, 3]),
+                "valid_time": Variable(
+                    dims=("time",),
+                    data=[2, 3, 4],  # time + 1
+                    attrs={"description": "time + 1"},
+                ),
+            }
+
+        result = dict(variables)
+        if "time" in variables:
+            result["valid_time"] = Variable(
+                dims=("time",),
+                data=variables["time"].data + 1,
+                attrs={"description": "time + 1"},
+            )
+        return result
+
+
+def test_set_xindex_with_extra_variables() -> None:
+    """Test that set_xindex raises an error when custom index creates extra variables."""
+
+    ds = xr.Dataset(coords={"time": [1, 2, 3]}).reset_index("time")
+
+    # Test that set_xindex raises error for extra variables
+    with pytest.raises(ValueError, match="extra variables 'valid_time'"):
+        ds.set_xindex("time", IndexWithExtraVariables)
+
+
+def test_set_xindex_factory_method_pattern() -> None:
+    ds = xr.Dataset(coords={"time": [1, 2, 3]}).reset_index("time")
+
+    # Test the recommended factory method pattern
+    coord_vars = {"time": ds._variables["time"]}
+    index = IndexWithExtraVariables.from_variables(coord_vars)
+    coords = xr.Coordinates.from_xindex(index)
+    result = ds.assign_coords(coords)
+
+    assert "time" in result.variables
+    assert "valid_time" in result.variables
+    assert_array_equal(result.valid_time.data, result.time.data + 1)
diff -pruN 2025.03.1-8/xarray/tests/test_indexing.py 2025.10.1-1/xarray/tests/test_indexing.py
--- 2025.03.1-8/xarray/tests/test_indexing.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/xarray/tests/test_indexing.py	2025-10-10 10:38:05.000000000 +0000
@@ -1,7 +1,7 @@
 from __future__ import annotations
 
 import itertools
-from typing import Any
+from typing import Any, Union
 
 import numpy as np
 import pandas as pd
@@ -129,7 +129,7 @@ class TestIndexers:
         ):
             dim_indexers = {"x": x_indexer}
             index_vars = x_index.create_variables()
-            indexes = {k: x_index for k in index_vars}
+            indexes = dict.fromkeys(index_vars, x_index)
             variables = {}
             variables.update(index_vars)
             variables.update(other_vars)
@@ -270,7 +270,7 @@ class TestIndexers:
         )  # Create a 2D DataArray
         arr = arr.expand_dims({"z": 3}, -1)  # New dimension 'z'
         arr["z"] = np.arange(3)  # New coords to dimension 'z'
-        with pytest.raises(ValueError, match="Do you want to .copy()"):
+        with pytest.raises(ValueError, match=r"Do you want to .copy()"):
             arr.loc[0, 0, 0] = 999
 
 
@@ -305,6 +305,22 @@ class TestLazyArray:
                     actual = x[new_slice]
                     assert_array_equal(expected, actual)
 
+    @pytest.mark.parametrize(
+        ["old_slice", "array", "size"],
+        (
+            (slice(None, 8), np.arange(2, 6), 10),
+            (slice(2, None), np.arange(2, 6), 10),
+            (slice(1, 10, 2), np.arange(1, 4), 15),
+            (slice(10, None, -1), np.array([2, 5, 7]), 12),
+            (slice(2, None, 2), np.array([3, -2, 5, -1]), 13),
+            (slice(8, None), np.array([1, -2, 2, -1, -7]), 20),
+        ),
+    )
+    def test_slice_slice_by_array(self, old_slice, array, size):
+        actual = indexing.slice_slice_by_array(old_slice, array, size)
+        expected = np.arange(size)[old_slice][array]
+        assert_array_equal(actual, expected)
+
     def test_lazily_indexed_array(self) -> None:
         original = np.random.rand(10, 20, 30)
         x = indexing.NumpyIndexingAdapter(original)
@@ -490,6 +506,25 @@ class TestMemoryCachedArray:
         assert isinstance(child.array, indexing.NumpyIndexingAdapter)
         assert isinstance(wrapped.array, indexing.LazilyIndexedArray)
 
+    @pytest.mark.asyncio
+    async def test_async_wrapper(self) -> None:
+        original = indexing.LazilyIndexedArray(np.arange(10))
+        wrapped = indexing.MemoryCachedArray(original)
+        await wrapped.async_get_duck_array()
+        assert_array_equal(wrapped, np.arange(10))
+        assert isinstance(wrapped.array, indexing.NumpyIndexingAdapter)
+
+    @pytest.mark.asyncio
+    async def test_async_sub_array(self) -> None:
+        original = indexing.LazilyIndexedArray(np.arange(10))
+        wrapped = indexing.MemoryCachedArray(original)
+        child = wrapped[B[:5]]
+        assert isinstance(child, indexing.MemoryCachedArray)
+        await child.async_get_duck_array()
+        assert_array_equal(child, np.arange(5))
+        assert isinstance(child.array, indexing.NumpyIndexingAdapter)
+        assert isinstance(wrapped.array, indexing.LazilyIndexedArray)
+
     def test_setitem(self) -> None:
         original = np.arange(10)
         wrapped = indexing.MemoryCachedArray(original)
@@ -533,6 +568,10 @@ def test_invalid_for_all(indexer_cls) ->
         indexer_cls((slice("foo"),))
     with pytest.raises(TypeError):
         indexer_cls((np.array(["foo"]),))
+    with pytest.raises(TypeError):
+        indexer_cls(True)
+    with pytest.raises(TypeError):
+        indexer_cls(np.array(True))
 
 
 def check_integer(indexer_cls):
@@ -603,7 +642,7 @@ class Test_vectorized_indexer:
 
     def test_arrayize_vectorized_indexer(self) -> None:
         for i, j, k in itertools.product(self.indexers, repeat=3):
-            vindex = indexing.VectorizedIndexer((i, j, k))
+            vindex = indexing.VectorizedIndexer((i, j, k))  # type: ignore[arg-type]
             vindex_array = indexing._arrayize_vectorized_indexer(
                 vindex, self.data.shape
             )
@@ -637,46 +676,58 @@ class Test_vectorized_indexer:
         np.testing.assert_array_equal(b, np.arange(5)[:, np.newaxis])
 
 
-def get_indexers(shape, mode):
+def get_indexers(
+    shape: tuple[int, ...], mode: str
+) -> Union[indexing.VectorizedIndexer, indexing.OuterIndexer, indexing.BasicIndexer]:
     if mode == "vectorized":
         indexed_shape = (3, 4)
-        indexer = tuple(np.random.randint(0, s, size=indexed_shape) for s in shape)
-        return indexing.VectorizedIndexer(indexer)
+        indexer_v = tuple(np.random.randint(0, s, size=indexed_shape) for s in shape)
+        return indexing.VectorizedIndexer(indexer_v)
 
     elif mode == "outer":
-        indexer = tuple(np.random.randint(0, s, s + 2) for s in shape)
-        return indexing.OuterIndexer(indexer)
+        indexer_o = tuple(np.random.randint(0, s, s + 2) for s in shape)
+        return indexing.OuterIndexer(indexer_o)
 
     elif mode == "outer_scalar":
-        indexer = (np.random.randint(0, 3, 4), 0, slice(None, None, 2))
-        return indexing.OuterIndexer(indexer[: len(shape)])
+        indexer_os: tuple[Any, ...] = (
+            np.random.randint(0, 3, 4),
+            0,
+            slice(None, None, 2),
+        )
+        return indexing.OuterIndexer(indexer_os[: len(shape)])
 
     elif mode == "outer_scalar2":
-        indexer = (np.random.randint(0, 3, 4), -2, slice(None, None, 2))
-        return indexing.OuterIndexer(indexer[: len(shape)])
+        indexer_os2: tuple[Any, ...] = (
+            np.random.randint(0, 3, 4),
+            -2,
+            slice(None, None, 2),
+        )
+        return indexing.OuterIndexer(indexer_os2[: len(shape)])
 
     elif mode == "outer1vec":
-        indexer = [slice(2, -3) for s in shape]
-        indexer[1] = np.random.randint(0, shape[1], shape[1] + 2)
-        return indexing.OuterIndexer(tuple(indexer))
+        indexer_o1v: list[Any] = [slice(2, -3) for s in shape]
+        indexer_o1v[1] = np.random.randint(0, shape[1], shape[1] + 2)
+        return indexing.OuterIndexer(tuple(indexer_o1v))
 
     elif mode == "basic":  # basic indexer
-        indexer = [slice(2, -3) for s in shape]
-        indexer[0] = 3
-        return indexing.BasicIndexer(tuple(indexer))
+        indexer_b: list[Any] = [slice(2, -3) for s in shape]
+        indexer_b[0] = 3
+        return indexing.BasicIndexer(tuple(indexer_b))
 
     elif mode == "basic1":  # basic indexer
         return indexing.BasicIndexer((3,))
 
     elif mode == "basic2":  # basic indexer
-        indexer = [0, 2, 4]
-        return indexing.BasicIndexer(tuple(indexer[: len(shape)]))
+        indexer_b2 = [0, 2, 4]
+        return indexing.BasicIndexer(tuple(indexer_b2[: len(shape)]))
 
     elif mode == "basic3":  # basic indexer
-        indexer = [slice(None) for s in shape]
-        indexer[0] = slice(-2, 2, -2)
-        indexer[1] = slice(1, -1, 2)
-        return indexing.BasicIndexer(tuple(indexer[: len(shape)]))
+        indexer_b3: list[Any] = [slice(None) for s in shape]
+        indexer_b3[0] = slice(-2, 2, -2)
+        indexer_b3[1] = slice(1, -1, 2)
+        return indexing.BasicIndexer(tuple(indexer_b3[: len(shape)]))
+
+    raise ValueError(f"Unknown mode: {mode}")
 
 
 @pytest.mark.parametrize("size", [100, 99])
diff -pruN 2025.03.1-8/xarray/tests/test_interp.py 2025.10.1-1/xarray/tests/test_interp.py
--- 2025.03.1-8/xarray/tests/test_interp.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/xarray/tests/test_interp.py	2025-10-10 10:38:05.000000000 +0000
@@ -1,5 +1,6 @@
 from __future__ import annotations
 
+import contextlib
 from itertools import combinations, permutations, product
 from typing import cast, get_args
 
@@ -29,10 +30,8 @@ from xarray.tests import (
 )
 from xarray.tests.test_dataset import create_test_data
 
-try:
+with contextlib.suppress(ImportError):
     import scipy
-except ImportError:
-    pass
 
 ALL_1D = get_args(Interp1dOptions) + get_args(InterpolantOptions)
 
@@ -125,7 +124,7 @@ def test_interpolate_1d(method: InterpOp
     if not has_scipy:
         pytest.skip("scipy is not installed.")
 
-    if not has_dask and case in [1]:
+    if not has_dask and case == 1:
         pytest.skip("dask is not installed in the environment.")
 
     da = get_example_data(case)
@@ -140,7 +139,7 @@ def test_interpolate_1d(method: InterpOp
             axis=obj.get_axis_num(dim),
             bounds_error=False,
             fill_value=np.nan,
-            kind=method,
+            kind=method,  # type: ignore[arg-type,unused-ignore]
         )(new_x)
 
     if dim == "x":
@@ -171,7 +170,7 @@ def test_interpolate_1d_methods(method:
             axis=obj.get_axis_num(dim),
             bounds_error=False,
             fill_value=np.nan,
-            kind=method,
+            kind=method,  # type: ignore[arg-type,unused-ignore]
         )(new_x)
 
     coords = {"x": xdest, "y": da["y"], "x2": ("x", func(da["x2"], xdest))}
@@ -230,7 +229,7 @@ def test_interpolate_vectorize(use_dask:
                 da[dim],
                 obj.data,
                 axis=obj.get_axis_num(dim),
-                kind=method,
+                kind=method,  # type: ignore[arg-type,unused-ignore]
                 bounds_error=False,
                 fill_value=np.nan,
                 **scipy_kwargs,
@@ -339,7 +338,7 @@ def test_interpolate_nd(case: int, metho
     zdest = nd_interp_coords["zdest"]
     grid_oned_points = nd_interp_coords["grid_oned_points"]
     actual = da.interp(x=xdest, y=ydest, z=zdest, method=method)
-    expected_data = scipy.interpolate.interpn(
+    expected_data_1d: np.ndarray = scipy.interpolate.interpn(
         points=(da.x, da.y, da.z),
         values=da.data,
         xi=grid_oned_points,
@@ -347,7 +346,7 @@ def test_interpolate_nd(case: int, metho
         bounds_error=False,
     ).reshape([len(xdest), len(zdest)])
     expected = xr.DataArray(
-        expected_data,
+        expected_data_1d,
         dims=["y", "z"],
         coords={
             "y": ydest,
@@ -434,7 +433,7 @@ def test_interpolate_nd_with_nan() -> No
     "case", [pytest.param(0, id="no_chunk"), pytest.param(1, id="chunk_y")]
 )
 def test_interpolate_scalar(method: InterpOptions, case: int) -> None:
-    if not has_dask and case in [1]:
+    if not has_dask and case == 1:
         pytest.skip("dask is not installed in the environment.")
 
     da = get_example_data(case)
@@ -450,7 +449,7 @@ def test_interpolate_scalar(method: Inte
             axis=obj.get_axis_num("x"),
             bounds_error=False,
             fill_value=np.nan,
-            kind=method,
+            kind=method,  # type: ignore[arg-type,unused-ignore]
         )(new_x)
 
     coords = {"x": xdest, "y": da["y"], "x2": func(da["x2"], xdest)}
@@ -464,7 +463,7 @@ def test_interpolate_scalar(method: Inte
     "case", [pytest.param(3, id="no_chunk"), pytest.param(4, id="chunked")]
 )
 def test_interpolate_nd_scalar(method: InterpOptions, case: int) -> None:
-    if not has_dask and case in [4]:
+    if not has_dask and case == 4:
         pytest.skip("dask is not installed in the environment.")
 
     da = get_example_data(case)
@@ -477,7 +476,7 @@ def test_interpolate_nd_scalar(method: I
     expected_data = scipy.interpolate.RegularGridInterpolator(
         (da["x"], da["y"], da["z"]),
         da.transpose("x", "y", "z").values,
-        method=method,
+        method=method,  # type: ignore[arg-type,unused-ignore]
         bounds_error=False,
         fill_value=np.nan,
     )(np.asarray([(xdest, ydest, z_val) for z_val in zdest]))
@@ -1067,6 +1066,28 @@ def test_interp1d_complex_out_of_bounds(
 
 
 @requires_scipy
+def test_interp_non_numeric_scalar() -> None:
+    ds = xr.Dataset(
+        {
+            "non_numeric": ("time", np.array(["a"])),
+        },
+        coords={"time": (np.array([0]))},
+    )
+    actual = ds.interp(time=np.linspace(0, 3, 3))
+
+    expected = xr.Dataset(
+        {
+            "non_numeric": ("time", np.array(["a", "a", "a"])),
+        },
+        coords={"time": np.linspace(0, 3, 3)},
+    )
+    xr.testing.assert_identical(actual, expected)
+
+    # Make sure the array is a copy:
+    assert actual["non_numeric"].data.base is None
+
+
+@requires_scipy
 def test_interp_non_numeric_1d() -> None:
     ds = xr.Dataset(
         {
diff -pruN 2025.03.1-8/xarray/tests/test_merge.py 2025.10.1-1/xarray/tests/test_merge.py
--- 2025.03.1-8/xarray/tests/test_merge.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/xarray/tests/test_merge.py	2025-10-10 10:38:05.000000000 +0000
@@ -1,10 +1,14 @@
 from __future__ import annotations
 
+import warnings
+
 import numpy as np
+import pandas as pd
 import pytest
 
 import xarray as xr
 from xarray.core import dtypes
+from xarray.core.options import set_options
 from xarray.structure import merge
 from xarray.structure.merge import MergeError
 from xarray.testing import assert_equal, assert_identical
@@ -37,15 +41,17 @@ class TestMergeFunction:
         expected = data[["var1", "var2"]]
         assert_identical(actual, expected)
 
-    def test_merge_datasets(self):
-        data = create_test_data(add_attrs=False, use_extension_array=True)
+    @pytest.mark.parametrize("use_new_combine_kwarg_defaults", [True, False])
+    def test_merge_datasets(self, use_new_combine_kwarg_defaults):
+        with set_options(use_new_combine_kwarg_defaults=use_new_combine_kwarg_defaults):
+            data = create_test_data(add_attrs=False, use_extension_array=True)
 
-        actual = xr.merge([data[["var1"]], data[["var2"]]])
-        expected = data[["var1", "var2"]]
-        assert_identical(actual, expected)
+            actual = xr.merge([data[["var1"]], data[["var2"]]])
+            expected = data[["var1", "var2"]]
+            assert_identical(actual, expected)
 
-        actual = xr.merge([data, data])
-        assert_identical(actual, data)
+            actual = xr.merge([data, data])
+            assert_identical(actual, data)
 
     def test_merge_dataarray_unnamed(self):
         data = xr.DataArray([1, 2], dims="x")
@@ -183,18 +189,26 @@ class TestMergeFunction:
         self, combine_attrs, attrs1, attrs2, expected_attrs, expect_exception
     ):
         """check that combine_attrs is used on data variables and coords"""
+        input_attrs1 = attrs1.copy()
         data1 = xr.Dataset(
             {"var1": ("dim1", [], attrs1)}, coords={"dim1": ("dim1", [], attrs1)}
         )
+        input_attrs2 = attrs2.copy()
         data2 = xr.Dataset(
             {"var1": ("dim1", [], attrs2)}, coords={"dim1": ("dim1", [], attrs2)}
         )
 
         if expect_exception:
             with pytest.raises(MergeError, match="combine_attrs"):
-                actual = xr.merge([data1, data2], combine_attrs=combine_attrs)
+                with pytest.warns(
+                    FutureWarning,
+                    match="will change from compat='no_conflicts' to compat='override'",
+                ):
+                    actual = xr.merge([data1, data2], combine_attrs=combine_attrs)
         else:
-            actual = xr.merge([data1, data2], combine_attrs=combine_attrs)
+            actual = xr.merge(
+                [data1, data2], compat="no_conflicts", combine_attrs=combine_attrs
+            )
             expected = xr.Dataset(
                 {"var1": ("dim1", [], expected_attrs)},
                 coords={"dim1": ("dim1", [], expected_attrs)},
@@ -202,6 +216,12 @@ class TestMergeFunction:
 
             assert_identical(actual, expected)
 
+            # Check also that input attributes weren't modified
+            assert data1["var1"].attrs == input_attrs1
+            assert data1.coords["dim1"].attrs == input_attrs1
+            assert data2["var1"].attrs == input_attrs2
+            assert data2.coords["dim1"].attrs == input_attrs2
+
     def test_merge_attrs_override_copy(self):
         ds1 = xr.Dataset(attrs={"x": 0})
         ds2 = xr.Dataset(attrs={"x": 1})
@@ -218,6 +238,267 @@ class TestMergeFunction:
         expected = xr.Dataset(attrs={"a": 0, "d": 0, "e": 0})
         assert_identical(actual, expected)
 
+    def test_merge_attrs_drop_conflicts_numpy_arrays(self):
+        """Test drop_conflicts with numpy arrays."""
+        # Test with numpy arrays (which return arrays from ==)
+        arr1 = np.array([1, 2, 3])
+        arr2 = np.array([1, 2, 3])
+        arr3 = np.array([4, 5, 6])
+
+        ds1 = xr.Dataset(attrs={"arr": arr1, "scalar": 1})
+        ds2 = xr.Dataset(attrs={"arr": arr2, "scalar": 1})  # Same array values
+        ds3 = xr.Dataset(attrs={"arr": arr3, "other": 2})  # Different array values
+
+        # Arrays are considered equivalent if they have the same values
+        actual = xr.merge([ds1, ds2], combine_attrs="drop_conflicts")
+        assert "arr" in actual.attrs  # Should keep the array since they're equivalent
+        assert actual.attrs["scalar"] == 1
+
+        # Different arrays cause the attribute to be dropped
+        actual = xr.merge([ds1, ds3], combine_attrs="drop_conflicts")
+        assert "arr" not in actual.attrs  # Should drop due to conflict
+        assert "other" in actual.attrs
+
+    def test_merge_attrs_drop_conflicts_custom_eq_returns_array(self):
+        """Test drop_conflicts with custom objects that return arrays from __eq__."""
+
+        # Test with custom objects that return non-bool from __eq__
+        class CustomEq:
+            """Object whose __eq__ returns a non-bool value."""
+
+            def __init__(self, value):
+                self.value = value
+
+            def __eq__(self, other):
+                if not isinstance(other, CustomEq):
+                    return False
+                # Return a numpy array (truthy if all elements are non-zero)
+                return np.array([self.value == other.value])
+
+            def __repr__(self):
+                return f"CustomEq({self.value})"
+
+        obj1 = CustomEq(42)
+        obj2 = CustomEq(42)  # Same value
+        obj3 = CustomEq(99)  # Different value
+
+        ds4 = xr.Dataset(attrs={"custom": obj1, "x": 1})
+        ds5 = xr.Dataset(attrs={"custom": obj2, "x": 1})
+        ds6 = xr.Dataset(attrs={"custom": obj3, "y": 2})
+
+        # Suppress DeprecationWarning from numpy < 2.0 about ambiguous truth values
+        # when our custom __eq__ returns arrays that are evaluated in boolean context
+        with warnings.catch_warnings():
+            warnings.filterwarnings("ignore", category=DeprecationWarning)
+
+            # Objects returning arrays are dropped (non-boolean return)
+            actual = xr.merge([ds4, ds5], combine_attrs="drop_conflicts")
+            assert "custom" not in actual.attrs  # Dropped - returns array, not bool
+            assert actual.attrs["x"] == 1
+
+            # Different values also dropped (returns array, not bool)
+            actual = xr.merge([ds4, ds6], combine_attrs="drop_conflicts")
+            assert "custom" not in actual.attrs  # Dropped - returns non-boolean
+            assert actual.attrs["x"] == 1
+            assert actual.attrs["y"] == 2
+
+    def test_merge_attrs_drop_conflicts_ambiguous_array_returns(self):
+        """Test drop_conflicts with objects returning ambiguous arrays from __eq__."""
+
+        # Test edge case: object whose __eq__ returns empty array (ambiguous truth value)
+        class EmptyArrayEq:
+            def __eq__(self, other):
+                if not isinstance(other, EmptyArrayEq):
+                    return False
+                return np.array([])  # Empty array has ambiguous truth value
+
+            def __repr__(self):
+                return "EmptyArrayEq()"
+
+        empty_obj1 = EmptyArrayEq()
+        empty_obj2 = EmptyArrayEq()
+
+        ds7 = xr.Dataset(attrs={"empty": empty_obj1})
+        ds8 = xr.Dataset(attrs={"empty": empty_obj2})
+
+        # With new behavior: ambiguous truth values are treated as non-equivalent
+        # So the attribute is dropped instead of raising an error
+        with warnings.catch_warnings():
+            warnings.filterwarnings("ignore", category=DeprecationWarning)
+            actual = xr.merge([ds7, ds8], combine_attrs="drop_conflicts")
+            assert "empty" not in actual.attrs  # Dropped due to ambiguous comparison
+
+        # Test with object that returns multi-element array (also ambiguous)
+        class MultiArrayEq:
+            def __eq__(self, other):
+                if not isinstance(other, MultiArrayEq):
+                    return False
+                return np.array([True, False])  # Multi-element array is ambiguous
+
+            def __repr__(self):
+                return "MultiArrayEq()"
+
+        multi_obj1 = MultiArrayEq()
+        multi_obj2 = MultiArrayEq()
+
+        ds9 = xr.Dataset(attrs={"multi": multi_obj1})
+        ds10 = xr.Dataset(attrs={"multi": multi_obj2})
+
+        # With new behavior: ambiguous arrays are treated as non-equivalent
+        with warnings.catch_warnings():
+            warnings.filterwarnings("ignore", category=DeprecationWarning)
+            actual = xr.merge([ds9, ds10], combine_attrs="drop_conflicts")
+            assert "multi" not in actual.attrs  # Dropped due to ambiguous comparison
+
+    def test_merge_attrs_drop_conflicts_all_true_array(self):
+        """Test drop_conflicts with all-True multi-element array from __eq__."""
+
+        # Test with all-True multi-element array (unambiguous truthy)
+        class AllTrueArrayEq:
+            def __eq__(self, other):
+                if not isinstance(other, AllTrueArrayEq):
+                    return False
+                return np.array([True, True, True])  # All True, but still multi-element
+
+            def __repr__(self):
+                return "AllTrueArrayEq()"
+
+        alltrue1 = AllTrueArrayEq()
+        alltrue2 = AllTrueArrayEq()
+
+        ds11 = xr.Dataset(attrs={"alltrue": alltrue1})
+        ds12 = xr.Dataset(attrs={"alltrue": alltrue2})
+
+        # Multi-element arrays are ambiguous even if all True
+        actual = xr.merge([ds11, ds12], combine_attrs="drop_conflicts")
+        assert "alltrue" not in actual.attrs  # Dropped due to ambiguous comparison
+
+    def test_merge_attrs_drop_conflicts_nested_arrays(self):
+        """Test drop_conflicts with NumPy object arrays containing nested arrays."""
+        # Test 1: NumPy object arrays with nested arrays
+        # These can have complex comparison behavior
+        x = np.array([None], dtype=object)
+        x[0] = np.arange(3)
+        y = np.array([None], dtype=object)
+        y[0] = np.arange(10, 13)
+
+        ds1 = xr.Dataset(attrs={"nested_array": x, "common": 1})
+        ds2 = xr.Dataset(attrs={"nested_array": y, "common": 1})
+
+        # Different nested arrays should cause attribute to be dropped
+        actual = xr.merge([ds1, ds2], combine_attrs="drop_conflicts")
+        assert (
+            "nested_array" not in actual.attrs
+        )  # Dropped due to different nested arrays
+        assert actual.attrs["common"] == 1
+
+        # Test with identical nested arrays
+        # Note: Even identical nested arrays will be dropped because comparison
+        # raises ValueError due to ambiguous truth value
+        z = np.array([None], dtype=object)
+        z[0] = np.arange(3)  # Same as x
+        ds3 = xr.Dataset(attrs={"nested_array": z, "other": 2})
+
+        actual = xr.merge([ds1, ds3], combine_attrs="drop_conflicts")
+        assert (
+            "nested_array" not in actual.attrs
+        )  # Dropped due to ValueError in comparison
+        assert actual.attrs["other"] == 2
+
+    def test_merge_attrs_drop_conflicts_dataset_attrs(self):
+        """Test drop_conflicts with xarray.Dataset objects as attributes."""
+        # xarray.Dataset objects as attributes (raises TypeError in equivalent)
+        attr_ds1 = xr.Dataset({"foo": 1})
+        attr_ds2 = xr.Dataset({"bar": 1})  # Different dataset
+        attr_ds3 = xr.Dataset({"foo": 1})  # Same as attr_ds1
+
+        ds4 = xr.Dataset(attrs={"dataset_attr": attr_ds1, "scalar": 42})
+        ds5 = xr.Dataset(attrs={"dataset_attr": attr_ds2, "scalar": 42})
+        ds6 = xr.Dataset(attrs={"dataset_attr": attr_ds3, "other": 99})
+
+        # Different datasets raise TypeError and should be dropped
+        actual = xr.merge([ds4, ds5], combine_attrs="drop_conflicts")
+        assert "dataset_attr" not in actual.attrs  # Dropped due to TypeError
+        assert actual.attrs["scalar"] == 42
+
+        # Identical datasets are also dropped (comparison returns Dataset, not bool)
+        actual = xr.merge([ds4, ds6], combine_attrs="drop_conflicts")
+        assert "dataset_attr" not in actual.attrs  # Dropped - returns Dataset, not bool
+        assert actual.attrs["other"] == 99
+
+    def test_merge_attrs_drop_conflicts_pandas_series(self):
+        """Test drop_conflicts with Pandas Series as attributes."""
+        # Pandas Series (raises ValueError due to ambiguous truth value)
+        series1 = pd.Series([1, 2])
+        series2 = pd.Series([3, 4])  # Different values
+        series3 = pd.Series([1, 2])  # Same as series1
+
+        ds7 = xr.Dataset(attrs={"series": series1, "value": "a"})
+        ds8 = xr.Dataset(attrs={"series": series2, "value": "a"})
+        ds9 = xr.Dataset(attrs={"series": series3, "value": "a"})
+
+        # Suppress potential warnings from pandas comparisons
+        with warnings.catch_warnings():
+            warnings.filterwarnings("ignore", category=DeprecationWarning)
+            warnings.filterwarnings("ignore", category=FutureWarning)
+
+            # Different series raise ValueError and get dropped
+            actual = xr.merge([ds7, ds8], combine_attrs="drop_conflicts")
+            assert "series" not in actual.attrs  # Dropped due to ValueError
+            assert actual.attrs["value"] == "a"
+
+            # Even identical series raise ValueError in equivalent() and get dropped
+            # because Series comparison returns another Series with ambiguous truth value
+            actual = xr.merge([ds7, ds9], combine_attrs="drop_conflicts")
+            assert "series" not in actual.attrs  # Dropped due to ValueError
+            assert actual.attrs["value"] == "a"
+
+    def test_merge_attrs_drop_conflicts_eq_returns_string(self):
+        """Test objects whose __eq__ returns strings are dropped."""
+
+        # Case 1: Objects whose __eq__ returns non-boolean strings
+        class ReturnsString:
+            def __init__(self, value):
+                self.value = value
+
+            def __eq__(self, other):
+                # Always returns a string (non-boolean)
+                return "comparison result"
+
+        obj1 = ReturnsString("A")
+        obj2 = ReturnsString("B")  # Different object
+
+        ds1 = xr.Dataset(attrs={"obj": obj1})
+        ds2 = xr.Dataset(attrs={"obj": obj2})
+
+        actual = xr.merge([ds1, ds2], combine_attrs="drop_conflicts")
+
+        # Strict behavior: drops attribute because __eq__ returns non-boolean
+        assert "obj" not in actual.attrs
+
+    def test_merge_attrs_drop_conflicts_eq_returns_number(self):
+        """Test objects whose __eq__ returns numbers are dropped."""
+
+        # Case 2: Objects whose __eq__ returns numbers
+        class ReturnsZero:
+            def __init__(self, value):
+                self.value = value
+
+            def __eq__(self, other):
+                # Always returns 0 (non-boolean)
+                return 0
+
+        obj3 = ReturnsZero("same")
+        obj4 = ReturnsZero("same")  # Different object, same value
+
+        ds3 = xr.Dataset(attrs={"zero": obj3})
+        ds4 = xr.Dataset(attrs={"zero": obj4})
+
+        actual = xr.merge([ds3, ds4], combine_attrs="drop_conflicts")
+
+        # Strict behavior: drops attribute because __eq__ returns non-boolean
+        assert "zero" not in actual.attrs
+
     def test_merge_attrs_no_conflicts_compat_minimal(self):
         """make sure compat="minimal" does not silence errors"""
         ds1 = xr.Dataset({"a": ("x", [], {"a": 0})})
@@ -256,19 +537,23 @@ class TestMergeFunction:
 
     def test_merge_wrong_input_error(self):
         with pytest.raises(TypeError, match=r"objects must be an iterable"):
-            xr.merge([1])
+            xr.merge([1])  # type: ignore[list-item]
         ds = xr.Dataset(coords={"x": [1, 2]})
         with pytest.raises(TypeError, match=r"objects must be an iterable"):
-            xr.merge({"a": ds})
+            xr.merge({"a": ds})  # type: ignore[dict-item]
         with pytest.raises(TypeError, match=r"objects must be an iterable"):
-            xr.merge([ds, 1])
+            xr.merge([ds, 1])  # type: ignore[list-item]
 
     def test_merge_no_conflicts_single_var(self):
         ds1 = xr.Dataset({"a": ("x", [1, 2]), "x": [0, 1]})
         ds2 = xr.Dataset({"a": ("x", [2, 3]), "x": [1, 2]})
         expected = xr.Dataset({"a": ("x", [1, 2, 3]), "x": [0, 1, 2]})
-        assert expected.identical(xr.merge([ds1, ds2], compat="no_conflicts"))
-        assert expected.identical(xr.merge([ds2, ds1], compat="no_conflicts"))
+        assert expected.identical(
+            xr.merge([ds1, ds2], compat="no_conflicts", join="outer")
+        )
+        assert expected.identical(
+            xr.merge([ds2, ds1], compat="no_conflicts", join="outer")
+        )
         assert ds1.identical(xr.merge([ds1, ds2], compat="no_conflicts", join="left"))
         assert ds2.identical(xr.merge([ds1, ds2], compat="no_conflicts", join="right"))
         expected = xr.Dataset({"a": ("x", [2]), "x": [1]})
@@ -278,11 +563,11 @@ class TestMergeFunction:
 
         with pytest.raises(xr.MergeError):
             ds3 = xr.Dataset({"a": ("x", [99, 3]), "x": [1, 2]})
-            xr.merge([ds1, ds3], compat="no_conflicts")
+            xr.merge([ds1, ds3], compat="no_conflicts", join="outer")
 
         with pytest.raises(xr.MergeError):
             ds3 = xr.Dataset({"a": ("y", [2, 3]), "y": [1, 2]})
-            xr.merge([ds1, ds3], compat="no_conflicts")
+            xr.merge([ds1, ds3], compat="no_conflicts", join="outer")
 
     def test_merge_no_conflicts_multi_var(self):
         data = create_test_data(add_attrs=False)
@@ -309,12 +594,12 @@ class TestMergeFunction:
 
     def test_merge_no_conflicts_broadcast(self):
         datasets = [xr.Dataset({"x": ("y", [0])}), xr.Dataset({"x": np.nan})]
-        actual = xr.merge(datasets)
+        actual = xr.merge(datasets, compat="no_conflicts")
         expected = xr.Dataset({"x": ("y", [0])})
         assert_identical(expected, actual)
 
         datasets = [xr.Dataset({"x": ("y", [np.nan])}), xr.Dataset({"x": 0})]
-        actual = xr.merge(datasets)
+        actual = xr.merge(datasets, compat="no_conflicts")
         assert_identical(expected, actual)
 
 
@@ -337,20 +622,35 @@ class TestMergeMethod:
         actual = data.merge(data.reset_coords(drop=True))
         assert_identical(data, actual)
 
-        with pytest.raises(ValueError):
+        with pytest.raises(ValueError, match="conflicting values for variable"):
             ds1.merge(ds2.rename({"var3": "var1"}))
         with pytest.raises(ValueError, match=r"should be coordinates or not"):
             data.reset_coords().merge(data)
         with pytest.raises(ValueError, match=r"should be coordinates or not"):
             data.merge(data.reset_coords())
 
-    def test_merge_broadcast_equals(self):
+    @pytest.mark.parametrize(
+        "join", ["outer", "inner", "left", "right", "exact", "override"]
+    )
+    def test_merge_drop_attrs(self, join):
+        data = create_test_data()
+        ds1 = data[["var1"]]
+        ds2 = data[["var3"]]
+        ds1.coords["dim2"].attrs["keep me"] = "example"
+        ds2.coords["numbers"].attrs["foo"] = "bar"
+        actual = ds1.merge(ds2, combine_attrs="drop", join=join)
+        assert actual.coords["dim2"].attrs == {}
+        assert actual.coords["numbers"].attrs == {}
+        assert ds1.coords["dim2"].attrs["keep me"] == "example"
+        assert ds2.coords["numbers"].attrs["foo"] == "bar"
+
+    def test_merge_compat_broadcast_equals(self):
         ds1 = xr.Dataset({"x": 0})
         ds2 = xr.Dataset({"x": ("y", [0, 0])})
-        actual = ds1.merge(ds2)
+        actual = ds1.merge(ds2, compat="broadcast_equals")
         assert_identical(ds2, actual)
 
-        actual = ds2.merge(ds1)
+        actual = ds2.merge(ds1, compat="broadcast_equals")
         assert_identical(ds2, actual)
 
         actual = ds1.copy()
@@ -359,7 +659,7 @@ class TestMergeMethod:
 
         ds1 = xr.Dataset({"x": np.nan})
         ds2 = xr.Dataset({"x": ("y", [np.nan, np.nan])})
-        actual = ds1.merge(ds2)
+        actual = ds1.merge(ds2, compat="broadcast_equals")
         assert_identical(ds2, actual)
 
     def test_merge_compat(self):
@@ -367,23 +667,24 @@ class TestMergeMethod:
         ds2 = xr.Dataset({"x": 1})
         for compat in ["broadcast_equals", "equals", "identical", "no_conflicts"]:
             with pytest.raises(xr.MergeError):
-                ds1.merge(ds2, compat=compat)
+                ds1.merge(ds2, compat=compat)  # type: ignore[arg-type]
 
         ds2 = xr.Dataset({"x": [0, 0]})
         for compat in ["equals", "identical"]:
             with pytest.raises(ValueError, match=r"should be coordinates or not"):
-                ds1.merge(ds2, compat=compat)
+                ds1.merge(ds2, compat=compat)  # type: ignore[arg-type]
 
         ds2 = xr.Dataset({"x": ((), 0, {"foo": "bar"})})
         with pytest.raises(xr.MergeError):
             ds1.merge(ds2, compat="identical")
 
         with pytest.raises(ValueError, match=r"compat=.* invalid"):
-            ds1.merge(ds2, compat="foobar")
+            ds1.merge(ds2, compat="foobar")  # type: ignore[arg-type]
 
         assert ds1.identical(ds1.merge(ds2, compat="override"))
 
     def test_merge_compat_minimal(self) -> None:
+        """Test that we drop the conflicting bar coordinate."""
         # https://github.com/pydata/xarray/issues/7405
         # https://github.com/pydata/xarray/issues/7588
         ds1 = xr.Dataset(coords={"foo": [1, 2, 3], "bar": 4})
@@ -393,14 +694,14 @@ class TestMergeMethod:
         expected = xr.Dataset(coords={"foo": [1, 2, 3]})
         assert_identical(actual, expected)
 
-    def test_merge_auto_align(self):
+    def test_merge_join_outer(self):
         ds1 = xr.Dataset({"a": ("x", [1, 2]), "x": [0, 1]})
         ds2 = xr.Dataset({"b": ("x", [3, 4]), "x": [1, 2]})
         expected = xr.Dataset(
             {"a": ("x", [1, 2, np.nan]), "b": ("x", [np.nan, 3, 4])}, {"x": [0, 1, 2]}
         )
-        assert expected.identical(ds1.merge(ds2))
-        assert expected.identical(ds2.merge(ds1))
+        assert expected.identical(ds1.merge(ds2, join="outer"))
+        assert expected.identical(ds2.merge(ds1, join="outer"))
 
         expected = expected.isel(x=slice(2))
         assert expected.identical(ds1.merge(ds2, join="left"))
@@ -428,17 +729,19 @@ class TestMergeMethod:
             {"a": ("x", [1, 2, fill_value_a]), "b": ("x", [fill_value_b, 3, 4])},
             {"x": [0, 1, 2]},
         )
-        assert expected.identical(ds1.merge(ds2, fill_value=fill_value))
-        assert expected.identical(ds2.merge(ds1, fill_value=fill_value))
-        assert expected.identical(xr.merge([ds1, ds2], fill_value=fill_value))
+        assert expected.identical(ds1.merge(ds2, join="outer", fill_value=fill_value))
+        assert expected.identical(ds2.merge(ds1, join="outer", fill_value=fill_value))
+        assert expected.identical(
+            xr.merge([ds1, ds2], join="outer", fill_value=fill_value)
+        )
 
     def test_merge_no_conflicts(self):
         ds1 = xr.Dataset({"a": ("x", [1, 2]), "x": [0, 1]})
         ds2 = xr.Dataset({"a": ("x", [2, 3]), "x": [1, 2]})
         expected = xr.Dataset({"a": ("x", [1, 2, 3]), "x": [0, 1, 2]})
 
-        assert expected.identical(ds1.merge(ds2, compat="no_conflicts"))
-        assert expected.identical(ds2.merge(ds1, compat="no_conflicts"))
+        assert expected.identical(ds1.merge(ds2, compat="no_conflicts", join="outer"))
+        assert expected.identical(ds2.merge(ds1, compat="no_conflicts", join="outer"))
 
         assert ds1.identical(ds1.merge(ds2, compat="no_conflicts", join="left"))
 
@@ -449,11 +752,11 @@ class TestMergeMethod:
 
         with pytest.raises(xr.MergeError):
             ds3 = xr.Dataset({"a": ("x", [99, 3]), "x": [1, 2]})
-            ds1.merge(ds3, compat="no_conflicts")
+            ds1.merge(ds3, compat="no_conflicts", join="outer")
 
         with pytest.raises(xr.MergeError):
             ds3 = xr.Dataset({"a": ("y", [2, 3]), "y": [1, 2]})
-            ds1.merge(ds3, compat="no_conflicts")
+            ds1.merge(ds3, compat="no_conflicts", join="outer")
 
     def test_merge_dataarray(self):
         ds = xr.Dataset({"a": 0})
@@ -491,3 +794,76 @@ class TestMergeMethod:
             actual = ds1.merge(ds2, combine_attrs=combine_attrs)
             expected = xr.Dataset(attrs=expected_attrs)
             assert_identical(actual, expected)
+
+
+class TestNewDefaults:
+    def test_merge_datasets_false_warning(self):
+        data = create_test_data(add_attrs=False, use_extension_array=True)
+
+        with set_options(use_new_combine_kwarg_defaults=False):
+            old = xr.merge([data, data])
+
+        with set_options(use_new_combine_kwarg_defaults=True):
+            new = xr.merge([data, data])
+
+        assert_identical(old, new)
+
+    def test_merge(self):
+        data = create_test_data()
+        ds1 = data[["var1"]]
+        ds2 = data[["var3"]]
+        expected = data[["var1", "var3"]]
+        with set_options(use_new_combine_kwarg_defaults=True):
+            actual = ds1.merge(ds2)
+            assert_identical(expected, actual)
+
+            actual = ds2.merge(ds1)
+            assert_identical(expected, actual)
+
+            actual = data.merge(data)
+            assert_identical(data, actual)
+
+            ds1.merge(ds2.rename({"var3": "var1"}))
+
+            with pytest.raises(ValueError, match=r"should be coordinates or not"):
+                data.reset_coords().merge(data)
+            with pytest.raises(ValueError, match=r"should be coordinates or not"):
+                data.merge(data.reset_coords())
+
+    def test_merge_broadcast_equals(self):
+        ds1 = xr.Dataset({"x": 0})
+        ds2 = xr.Dataset({"x": ("y", [0, 0])})
+
+        with set_options(use_new_combine_kwarg_defaults=False):
+            with pytest.warns(
+                FutureWarning,
+                match="will change from compat='no_conflicts' to compat='override'",
+            ):
+                old = ds1.merge(ds2)
+
+        with set_options(use_new_combine_kwarg_defaults=True):
+            new = ds1.merge(ds2)
+
+        assert_identical(ds2, old)
+        with pytest.raises(AssertionError):
+            assert_identical(old, new)
+
+    def test_merge_auto_align(self):
+        ds1 = xr.Dataset({"a": ("x", [1, 2]), "x": [0, 1]})
+        ds2 = xr.Dataset({"b": ("x", [3, 4]), "x": [1, 2]})
+        expected = xr.Dataset(
+            {"a": ("x", [1, 2, np.nan]), "b": ("x", [np.nan, 3, 4])}, {"x": [0, 1, 2]}
+        )
+        with set_options(use_new_combine_kwarg_defaults=False):
+            with pytest.warns(
+                FutureWarning, match="will change from join='outer' to join='exact'"
+            ):
+                assert expected.identical(ds1.merge(ds2))
+            with pytest.warns(
+                FutureWarning, match="will change from join='outer' to join='exact'"
+            ):
+                assert expected.identical(ds2.merge(ds1))
+
+        with set_options(use_new_combine_kwarg_defaults=True):
+            with pytest.raises(ValueError, match="might be related to new default"):
+                expected.identical(ds2.merge(ds1))
diff -pruN 2025.03.1-8/xarray/tests/test_missing.py 2025.10.1-1/xarray/tests/test_missing.py
--- 2025.03.1-8/xarray/tests/test_missing.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/xarray/tests/test_missing.py	2025-10-10 10:38:05.000000000 +0000
@@ -1,12 +1,15 @@
 from __future__ import annotations
 
 import itertools
+from typing import Any
+from unittest import mock
 
 import numpy as np
 import pandas as pd
 import pytest
 
 import xarray as xr
+from xarray.core import indexing
 from xarray.core.missing import (
     NumpyInterpolator,
     ScipyInterpolator,
@@ -202,7 +205,7 @@ def test_interpolate_unsorted_index_rais
     vals = np.array([1, 2, 3], dtype=np.float64)
     expected = xr.DataArray(vals, dims="x", coords={"x": [2, 1, 3]})
     with pytest.raises(ValueError, match=r"Index 'x' must be monotonically increasing"):
-        expected.interpolate_na(dim="x", method="index")
+        expected.interpolate_na(dim="x", method="index")  # type: ignore[arg-type]
 
 
 def test_interpolate_no_dim_raises():
@@ -214,14 +217,14 @@ def test_interpolate_no_dim_raises():
 def test_interpolate_invalid_interpolator_raises():
     da = xr.DataArray(np.array([1, 2, np.nan, 5], dtype=np.float64), dims="x")
     with pytest.raises(ValueError, match=r"not a valid"):
-        da.interpolate_na(dim="x", method="foo")
+        da.interpolate_na(dim="x", method="foo")  # type: ignore[arg-type]
 
 
 def test_interpolate_duplicate_values_raises():
     data = np.random.randn(2, 3)
     da = xr.DataArray(data, coords=[("x", ["a", "a"]), ("y", [0, 1, 2])])
     with pytest.raises(ValueError, match=r"Index 'x' has duplicate values"):
-        da.interpolate_na(dim="x", method="foo")
+        da.interpolate_na(dim="x", method="foo")  # type: ignore[arg-type]
 
 
 def test_interpolate_multiindex_raises():
@@ -329,15 +332,15 @@ def test_interpolate_limits():
 @requires_scipy
 def test_interpolate_methods():
     for method in ["linear", "nearest", "zero", "slinear", "quadratic", "cubic"]:
-        kwargs = {}
+        kwargs: dict[str, Any] = {}
         da = xr.DataArray(
             np.array([0, 1, 2, np.nan, np.nan, np.nan, 6, 7, 8], dtype=np.float64),
             dims="x",
         )
-        actual = da.interpolate_na("x", method=method, **kwargs)
+        actual = da.interpolate_na("x", method=method, **kwargs)  # type: ignore[arg-type]
         assert actual.isnull().sum() == 0
 
-        actual = da.interpolate_na("x", method=method, limit=2, **kwargs)
+        actual = da.interpolate_na("x", method=method, limit=2, **kwargs)  # type: ignore[arg-type]
         assert actual.isnull().sum() == 1
 
 
@@ -772,3 +775,29 @@ def test_interpolators_complex_out_of_bo
         f = interpolator(xi, yi, method=method)
         actual = f(x)
         assert_array_equal(actual, expected)
+
+
+@requires_scipy
+def test_indexing_localize():
+    # regression test for GH10287
+    ds = xr.Dataset(
+        {
+            "sigma_a": xr.DataArray(
+                data=np.ones((16, 8, 36811)),
+                dims=["p", "t", "w"],
+                coords={"w": np.linspace(0, 30000, 36811)},
+            )
+        }
+    )
+
+    original_func = indexing.NumpyIndexingAdapter.__getitem__
+
+    def wrapper(self, indexer):
+        return original_func(self, indexer)
+
+    with mock.patch.object(
+        indexing.NumpyIndexingAdapter, "__getitem__", side_effect=wrapper, autospec=True
+    ) as mock_func:
+        ds["sigma_a"].interp(w=15000.5)
+    actual_indexer = mock_func.mock_calls[0].args[1]._key
+    assert actual_indexer == (slice(None), slice(None), slice(18404, 18408))
diff -pruN 2025.03.1-8/xarray/tests/test_namedarray.py 2025.10.1-1/xarray/tests/test_namedarray.py
--- 2025.03.1-8/xarray/tests/test_namedarray.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/xarray/tests/test_namedarray.py	2025-10-10 10:38:05.000000000 +0000
@@ -55,7 +55,7 @@ class CustomArray(
     CustomArrayBase[_ShapeType_co, _DType_co], Generic[_ShapeType_co, _DType_co]
 ):
     def __array__(
-        self, dtype: np.typing.DTypeLike = None, /, *, copy: bool | None = None
+        self, dtype: DTypeLike | None = None, /, *, copy: bool | None = None
     ) -> np.ndarray[Any, np.dtype[np.generic]]:
         if Version(np.__version__) >= Version("2.0.0"):
             return np.asarray(self.array, dtype=dtype, copy=copy)
@@ -292,7 +292,7 @@ class TestNamedArray(NamedArraySubclasso
             (b"foo", np.dtype("S3")),
         ],
     )
-    def test_from_array_0d_string(self, data: Any, dtype: DTypeLike) -> None:
+    def test_from_array_0d_string(self, data: Any, dtype: DTypeLike | None) -> None:
         named_array: NamedArray[Any, Any]
         named_array = from_array([], data)
         assert named_array.data == data
@@ -374,7 +374,7 @@ class TestNamedArray(NamedArraySubclasso
 
         masked_a: np.ma.MaskedArray[Any, np.dtype[np.int64]]
         masked_a = np.ma.asarray([2.1, 4], dtype=np.dtype(np.int64))  # type: ignore[no-untyped-call]
-        check_duck_array_typevar(masked_a)
+        check_duck_array_typevar(masked_a)  # type: ignore[arg-type]  # MaskedArray not in duckarray union
 
         custom_a: CustomArrayIndexable[Any, np.dtype[np.int64]]
         custom_a = CustomArrayIndexable(numpy_a)
diff -pruN 2025.03.1-8/xarray/tests/test_nd_point_index.py 2025.10.1-1/xarray/tests/test_nd_point_index.py
--- 2025.03.1-8/xarray/tests/test_nd_point_index.py	1970-01-01 00:00:00.000000000 +0000
+++ 2025.10.1-1/xarray/tests/test_nd_point_index.py	2025-10-10 10:38:05.000000000 +0000
@@ -0,0 +1,183 @@
+import numpy as np
+import pytest
+
+import xarray as xr
+from xarray.indexes import NDPointIndex
+from xarray.tests import assert_identical
+
+pytest.importorskip("scipy")
+
+
+def test_tree_index_init() -> None:
+    from xarray.indexes.nd_point_index import ScipyKDTreeAdapter
+
+    xx, yy = np.meshgrid([1.0, 2.0], [3.0, 4.0])
+    ds = xr.Dataset(coords={"xx": (("y", "x"), xx), "yy": (("y", "x"), yy)})
+
+    ds_indexed1 = ds.set_xindex(("xx", "yy"), NDPointIndex)
+    assert "xx" in ds_indexed1.xindexes
+    assert "yy" in ds_indexed1.xindexes
+    assert isinstance(ds_indexed1.xindexes["xx"], NDPointIndex)
+    assert ds_indexed1.xindexes["xx"] is ds_indexed1.xindexes["yy"]
+
+    ds_indexed2 = ds.set_xindex(
+        ("xx", "yy"), NDPointIndex, tree_adapter_cls=ScipyKDTreeAdapter
+    )
+    assert ds_indexed1.xindexes["xx"].equals(ds_indexed2.xindexes["yy"])
+
+
+def test_tree_index_init_errors() -> None:
+    xx, yy = np.meshgrid([1.0, 2.0], [3.0, 4.0])
+    ds = xr.Dataset(coords={"xx": (("y", "x"), xx), "yy": (("y", "x"), yy)})
+
+    with pytest.raises(ValueError, match="number of variables"):
+        ds.set_xindex("xx", NDPointIndex)
+
+    ds2 = ds.assign_coords(yy=(("u", "v"), [[3.0, 3.0], [4.0, 4.0]]))
+
+    with pytest.raises(ValueError, match="same dimensions"):
+        ds2.set_xindex(("xx", "yy"), NDPointIndex)
+
+
+def test_tree_index_sel() -> None:
+    xx, yy = np.meshgrid([1.0, 2.0], [3.0, 4.0])
+    ds = xr.Dataset(coords={"xx": (("y", "x"), xx), "yy": (("y", "x"), yy)}).set_xindex(
+        ("xx", "yy"), NDPointIndex
+    )
+
+    # 1-dimensional labels
+    actual = ds.sel(
+        xx=xr.Variable("u", [1.1, 1.1, 1.1]),
+        yy=xr.Variable("u", [3.1, 3.1, 3.1]),
+        method="nearest",
+    )
+    expected = xr.Dataset(
+        coords={"xx": ("u", [1.0, 1.0, 1.0]), "yy": ("u", [3.0, 3.0, 3.0])}
+    )
+    assert_identical(actual, expected)
+
+    # 2-dimensional labels
+    actual = ds.sel(
+        xx=xr.Variable(("u", "v"), [[1.1, 1.1, 1.1], [1.9, 1.9, 1.9]]),
+        yy=xr.Variable(("u", "v"), [[3.1, 3.1, 3.1], [3.9, 3.9, 3.9]]),
+        method="nearest",
+    )
+    expected = xr.Dataset(
+        coords={
+            "xx": (("u", "v"), [[1.0, 1.0, 1.0], [2.0, 2.0, 2.0]]),
+            "yy": (("u", "v"), [[3.0, 3.0, 3.0], [4.0, 4.0, 4.0]]),
+        },
+    )
+    assert_identical(actual, expected)
+
+    # all scalar labels
+    actual = ds.sel(xx=1.1, yy=3.1, method="nearest")
+    expected = xr.Dataset(coords={"xx": 1.0, "yy": 3.0})
+    assert_identical(actual, expected)
+
+    # broadcast scalar to label shape and dimensions
+    actual = ds.sel(xx=1.1, yy=xr.Variable("u", [3.1, 3.1, 3.1]), method="nearest")
+    expected = ds.sel(
+        xx=xr.Variable("u", [1.1, 1.1, 1.1]),
+        yy=xr.Variable("u", [3.1, 3.1, 3.1]),
+        method="nearest",
+    )
+    assert_identical(actual, expected)
+
+    # broadcast orthogonal 1-dimensional labels
+    actual = ds.sel(
+        xx=xr.Variable("u", [1.1, 1.1]),
+        yy=xr.Variable("v", [3.1, 3.1]),
+        method="nearest",
+    )
+    expected = xr.Dataset(
+        coords={
+            "xx": (("u", "v"), [[1.0, 1.0], [1.0, 1.0]]),
+            "yy": (("u", "v"), [[3.0, 3.0], [3.0, 3.0]]),
+        },
+    )
+    assert_identical(actual, expected)
+
+    # implicit dimension array-like labels
+    actual = ds.sel(
+        xx=[[1.1, 1.1, 1.1], [1.9, 1.9, 1.9]],
+        yy=[[3.1, 3.1, 3.1], [3.9, 3.9, 3.9]],
+        method="nearest",
+    )
+    expected = ds.sel(
+        xx=xr.Variable(ds.xx.dims, [[1.1, 1.1, 1.1], [1.9, 1.9, 1.9]]),
+        yy=xr.Variable(ds.yy.dims, [[3.1, 3.1, 3.1], [3.9, 3.9, 3.9]]),
+        method="nearest",
+    )
+    assert_identical(actual, expected)
+
+
+def test_tree_index_sel_errors() -> None:
+    xx, yy = np.meshgrid([1.0, 2.0], [3.0, 4.0])
+    ds = xr.Dataset(coords={"xx": (("y", "x"), xx), "yy": (("y", "x"), yy)}).set_xindex(
+        ("xx", "yy"), NDPointIndex
+    )
+
+    with pytest.raises(ValueError, match="method='nearest'"):
+        ds.sel(xx=1.1, yy=3.1)
+
+    with pytest.raises(ValueError, match="missing labels"):
+        ds.sel(xx=1.1, method="nearest")
+
+    with pytest.raises(ValueError, match="invalid label value"):
+        # invalid array-like dimensions
+        ds.sel(xx=[1.1, 1.9], yy=[3.1, 3.9], method="nearest")
+
+    # error while trying to broadcast labels
+    with pytest.raises(xr.AlignmentError, match=r".*conflicting dimension sizes"):
+        ds.sel(
+            xx=xr.Variable("u", [1.1, 1.1, 1.1]),
+            yy=xr.Variable("u", [3.1, 3.1]),
+            method="nearest",
+        )
+
+
+def test_tree_index_equals() -> None:
+    xx1, yy1 = np.meshgrid([1.0, 2.0], [3.0, 4.0])
+    ds1 = xr.Dataset(
+        coords={"xx": (("y", "x"), xx1), "yy": (("y", "x"), yy1)}
+    ).set_xindex(("xx", "yy"), NDPointIndex)
+
+    xx2, yy2 = np.meshgrid([1.0, 2.0], [3.0, 4.0])
+    ds2 = xr.Dataset(
+        coords={"xx": (("y", "x"), xx2), "yy": (("y", "x"), yy2)}
+    ).set_xindex(("xx", "yy"), NDPointIndex)
+
+    xx3, yy3 = np.meshgrid([10.0, 20.0], [30.0, 40.0])
+    ds3 = xr.Dataset(
+        coords={"xx": (("y", "x"), xx3), "yy": (("y", "x"), yy3)}
+    ).set_xindex(("xx", "yy"), NDPointIndex)
+
+    assert ds1.xindexes["xx"].equals(ds2.xindexes["xx"])
+    assert not ds1.xindexes["xx"].equals(ds3.xindexes["xx"])
+
+
+def test_tree_index_rename() -> None:
+    xx, yy = np.meshgrid([1.0, 2.0], [3.0, 4.0])
+    ds = xr.Dataset(coords={"xx": (("y", "x"), xx), "yy": (("y", "x"), yy)}).set_xindex(
+        ("xx", "yy"), NDPointIndex
+    )
+
+    ds_renamed = ds.rename_dims(y="u").rename_vars(yy="uu")
+    assert "uu" in ds_renamed.xindexes
+    assert isinstance(ds_renamed.xindexes["uu"], NDPointIndex)
+    assert ds_renamed.xindexes["xx"] is ds_renamed.xindexes["uu"]
+
+    # test via sel() with implicit dimension array-like labels, which relies on
+    # NDPointIndex._coord_names and NDPointIndex._dims internal attrs
+    actual = ds_renamed.sel(
+        xx=[[1.1, 1.1, 1.1], [1.9, 1.9, 1.9]],
+        uu=[[3.1, 3.1, 3.1], [3.9, 3.9, 3.9]],
+        method="nearest",
+    )
+    expected = ds_renamed.sel(
+        xx=xr.Variable(ds_renamed.xx.dims, [[1.1, 1.1, 1.1], [1.9, 1.9, 1.9]]),
+        uu=xr.Variable(ds_renamed.uu.dims, [[3.1, 3.1, 3.1], [3.9, 3.9, 3.9]]),
+        method="nearest",
+    )
+    assert_identical(actual, expected)
diff -pruN 2025.03.1-8/xarray/tests/test_nputils.py 2025.10.1-1/xarray/tests/test_nputils.py
--- 2025.03.1-8/xarray/tests/test_nputils.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/xarray/tests/test_nputils.py	2025-10-10 10:38:05.000000000 +0000
@@ -18,7 +18,7 @@ def test_vindex() -> None:
 
     # getitem
     assert_array_equal(vindex[0], x[0])
-    assert_array_equal(vindex[[1, 2], [1, 2]], x[[1, 2], [1, 2]])
+    assert_array_equal(vindex[[1, 2], [1, 2]], x[([1, 2], [1, 2])])
     assert vindex[[0, 1], [0, 1], :].shape == (2, 5)
     assert vindex[[0, 1], :, [0, 1]].shape == (2, 4)
     assert vindex[:, [0, 1], [0, 1]].shape == (2, 3)
diff -pruN 2025.03.1-8/xarray/tests/test_options.py 2025.10.1-1/xarray/tests/test_options.py
--- 2025.03.1-8/xarray/tests/test_options.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/xarray/tests/test_options.py	2025-10-10 10:38:05.000000000 +0000
@@ -1,5 +1,7 @@
 from __future__ import annotations
 
+import re
+
 import pytest
 
 import xarray
@@ -69,6 +71,19 @@ def test_nested_options() -> None:
     assert OPTIONS["display_width"] == original
 
 
+def test_netcdf_engine_order() -> None:
+    original = OPTIONS["netcdf_engine_order"]
+    with pytest.raises(
+        ValueError,
+        match=re.escape(
+            "option 'netcdf_engine_order' given an invalid value: ['invalid']. "
+            "Expected a subset of ['h5netcdf', 'netcdf4', 'scipy']"
+        ),
+    ):
+        xarray.set_options(netcdf_engine_order=["invalid"])
+    assert OPTIONS["netcdf_engine_order"] == original
+
+
 def test_display_style() -> None:
     original = "html"
     assert OPTIONS["display_style"] == original
diff -pruN 2025.03.1-8/xarray/tests/test_pandas_to_xarray.py 2025.10.1-1/xarray/tests/test_pandas_to_xarray.py
--- 2025.03.1-8/xarray/tests/test_pandas_to_xarray.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/xarray/tests/test_pandas_to_xarray.py	2025-10-10 10:38:05.000000000 +0000
@@ -37,6 +37,7 @@ import numpy as np
 import pandas as pd
 import pandas._testing as tm
 import pytest
+from packaging.version import Version
 from pandas import (
     Categorical,
     CategoricalIndex,
@@ -51,7 +52,7 @@ from pandas import (
     timedelta_range,
 )
 
-indices_dict = {
+indices_dict: dict[str, Index] = {
     "object": Index([f"pandas_{i}" for i in range(10)], dtype=object),
     "string": Index([f"pandas_{i}" for i in range(10)], dtype="str"),
     "datetime": date_range("2020-01-01", periods=10),
@@ -78,7 +79,7 @@ indices_dict = {
         np.arange(10, dtype="complex128") + 1.0j * np.arange(10, dtype="complex128")
     ),
     "categorical": CategoricalIndex(list("abcd") * 2),
-    "interval": IntervalIndex.from_breaks(np.linspace(0, 100, num=11)),
+    "interval": IntervalIndex.from_breaks(np.linspace(0, 100, num=11, dtype="int")),
     "empty": Index([]),
     # "tuples": MultiIndex.from_tuples(zip(["foo", "bar", "baz"], [1, 2, 3])),
     # "mi-with-dt64tz-level": _create_mi_with_dt64tz_level(),
@@ -107,14 +108,6 @@ def index_flat(request):
     return indices_dict[key].copy()
 
 
-@pytest.fixture
-def using_infer_string() -> bool:
-    """
-    Fixture to check if infer string option is enabled.
-    """
-    return pd.options.future.infer_string is True  # type: ignore[union-attr]
-
-
 class TestDataFrameToXArray:
     @pytest.fixture
     def df(self):
@@ -131,8 +124,7 @@ class TestDataFrameToXArray:
             }
         )
 
-    @pytest.mark.xfail(reason="needs some work")
-    def test_to_xarray_index_types(self, index_flat, df, using_infer_string):
+    def test_to_xarray_index_types(self, index_flat, df):
         index = index_flat
         # MultiIndex is tested in test_to_xarray_with_multiindex
         if len(index) == 0:
@@ -154,9 +146,6 @@ class TestDataFrameToXArray:
         # datetimes w/tz are preserved
         # column names are lost
         expected = df.copy()
-        expected["f"] = expected["f"].astype(
-            object if not using_infer_string else "str"
-        )
         expected.columns.name = None
         tm.assert_frame_equal(result.to_dataframe(), expected)
 
@@ -168,7 +157,7 @@ class TestDataFrameToXArray:
         assert result.sizes["foo"] == 0
         assert isinstance(result, Dataset)
 
-    def test_to_xarray_with_multiindex(self, df, using_infer_string):
+    def test_to_xarray_with_multiindex(self, df):
         from xarray import Dataset
 
         # MultiIndex
@@ -184,7 +173,7 @@ class TestDataFrameToXArray:
         result = result.to_dataframe()
         expected = df.copy()
         expected["f"] = expected["f"].astype(
-            object if not using_infer_string else "str"
+            object if Version(pd.__version__) < Version("3.0.0dev0") else str
         )
         expected.columns.name = None
         tm.assert_frame_equal(result, expected)
diff -pruN 2025.03.1-8/xarray/tests/test_parallelcompat.py 2025.10.1-1/xarray/tests/test_parallelcompat.py
--- 2025.03.1-8/xarray/tests/test_parallelcompat.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/xarray/tests/test_parallelcompat.py	2025-10-10 10:38:05.000000000 +0000
@@ -48,7 +48,7 @@ class DummyChunkedArray(np.ndarray):
     def __array_finalize__(self, obj):
         if obj is None:
             return
-        self.chunks = getattr(obj, "chunks", None)
+        self.chunks = getattr(obj, "chunks", None)  # type: ignore[assignment]
 
     def rechunk(self, chunks, **kwargs):
         copied = self.copy()
@@ -90,7 +90,7 @@ class DummyChunkManager(ChunkManagerEntr
     def rechunk(self, data: DummyChunkedArray, chunks, **kwargs) -> DummyChunkedArray:
         return data.rechunk(chunks, **kwargs)
 
-    def compute(self, *data: DummyChunkedArray, **kwargs) -> tuple[np.ndarray, ...]:
+    def compute(self, *data: DummyChunkedArray, **kwargs) -> tuple[np.ndarray, ...]:  # type: ignore[override]
         from dask.array import compute
 
         return compute(*data, **kwargs)
@@ -164,7 +164,7 @@ class TestGetChunkManager:
     ) -> None:
         monkeypatch.setitem(KNOWN_CHUNKMANAGERS, "test", "test-package")
         with pytest.raises(
-            ImportError, match="chunk manager 'test' is not available.+test-package"
+            ImportError, match=r"chunk manager 'test' is not available.+test-package"
         ):
             guess_chunkmanager("test")
 
diff -pruN 2025.03.1-8/xarray/tests/test_plot.py 2025.10.1-1/xarray/tests/test_plot.py
--- 2025.03.1-8/xarray/tests/test_plot.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/xarray/tests/test_plot.py	2025-10-10 10:38:05.000000000 +0000
@@ -50,10 +50,8 @@ try:
 except ImportError:
     pass
 
-try:
+with contextlib.suppress(ImportError):
     import cartopy
-except ImportError:
-    pass
 
 
 @contextlib.contextmanager
@@ -66,7 +64,7 @@ def figure_context(*args, **kwargs):
         plt.close("all")
 
 
-@pytest.fixture(scope="function", autouse=True)
+@pytest.fixture(autouse=True)
 def test_all_figures_closed():
     """meta-test to ensure all figures are closed at the end of a test
 
@@ -156,7 +154,7 @@ class PlotTestCase:
         plt.close("all")
 
     def pass_in_axis(self, plotmethod, subplot_kw=None) -> None:
-        fig, axs = plt.subplots(ncols=2, subplot_kw=subplot_kw, squeeze=False)
+        _fig, axs = plt.subplots(ncols=2, subplot_kw=subplot_kw, squeeze=False)
         ax = axs[0, 0]
         plotmethod(ax=ax)
         assert ax.has_data()
@@ -237,9 +235,9 @@ class TestPlot(PlotTestCase):
         z = np.arange(10)
         da = DataArray(np.cos(z), dims=["z"], coords=[z], name="f")
 
-        xy: list[list[None | str]] = [[None, None], [None, "z"], ["z", None]]
+        xy: list[list[str | None]] = [[None, None], [None, "z"], ["z", None]]
 
-        f, axs = plt.subplots(3, 1, squeeze=False)
+        _f, axs = plt.subplots(3, 1, squeeze=False)
         for aa, (x, y) in enumerate(xy):
             da.plot(x=x, y=y, ax=axs.flat[aa])  # type: ignore[call-arg]
 
@@ -333,9 +331,10 @@ class TestPlot(PlotTestCase):
         assert not plt.gca().get_legend()
         plt.cla()
         self.darray[:, :, 0].plot.line(x="dim_0", add_legend=True)
-        assert plt.gca().get_legend()
+        legend = plt.gca().get_legend()
+        assert legend is not None
         # check whether legend title is set
-        assert plt.gca().get_legend().get_title().get_text() == "dim_1"
+        assert legend.get_title().get_text() == "dim_1"
 
     def test_2d_line_accepts_x_kw(self) -> None:
         self.darray[:, :, 0].plot.line(x="dim_0")
@@ -346,10 +345,14 @@ class TestPlot(PlotTestCase):
 
     def test_2d_line_accepts_hue_kw(self) -> None:
         self.darray[:, :, 0].plot.line(hue="dim_0")
-        assert plt.gca().get_legend().get_title().get_text() == "dim_0"
+        legend = plt.gca().get_legend()
+        assert legend is not None
+        assert legend.get_title().get_text() == "dim_0"
         plt.cla()
         self.darray[:, :, 0].plot.line(hue="dim_1")
-        assert plt.gca().get_legend().get_title().get_text() == "dim_1"
+        legend = plt.gca().get_legend()
+        assert legend is not None
+        assert legend.get_title().get_text() == "dim_1"
 
     def test_2d_coords_line_plot(self) -> None:
         lon, lat = np.meshgrid(np.linspace(-20, 20, 5), np.linspace(0, 30, 4))
@@ -597,7 +600,7 @@ class TestPlot(PlotTestCase):
                 [-137.85, -120.99, -103.28, -85.28, -67.62],
             ]
         )
-        data = np.sqrt(lon**2 + lat**2)
+        data = np.hypot(lon, lat)
         da = DataArray(
             data,
             dims=("y", "x"),
@@ -830,7 +833,7 @@ class TestPlot1D(PlotTestCase):
         darray = self.darray.expand_dims({"d": np.array([10.009])})
         darray.plot.line(x="period")
         title = plt.gca().get_title()
-        assert "d = 10.01" == title
+        assert "d = [10.009]" == title
 
 
 class TestPlotStep(PlotTestCase):
@@ -1529,7 +1532,7 @@ class Common2dMixin:
         a.coords["d"] = "foo"
         self.plotfunc(a.isel(c=1))
         title = plt.gca().get_title()
-        assert "c = 1, d = foo" == title or "d = foo, c = 1" == title
+        assert title in {"c = 1, d = foo", "d = foo, c = 1"}
 
     def test_colorbar_default_label(self) -> None:
         self.plotmethod(add_colorbar=True)
@@ -1568,7 +1571,7 @@ class Common2dMixin:
         assert "MyLabel" in alltxt
         assert "testvar" not in alltxt
         # change cbar ax
-        fig, axs = plt.subplots(1, 2, squeeze=False)
+        _fig, axs = plt.subplots(1, 2, squeeze=False)
         ax = axs[0, 0]
         cax = axs[0, 1]
         self.plotmethod(
@@ -1580,7 +1583,7 @@ class Common2dMixin:
         assert "MyBar" in alltxt
         assert "testvar" not in alltxt
         # note that there are two ways to achieve this
-        fig, axs = plt.subplots(1, 2, squeeze=False)
+        _fig, axs = plt.subplots(1, 2, squeeze=False)
         ax = axs[0, 0]
         cax = axs[0, 1]
         self.plotmethod(
@@ -1773,6 +1776,18 @@ class TestContourf(Common2dMixin, PlotTe
         artist = self.plotmethod(levels=3)
         assert artist.extend == "neither"
 
+    def test_colormap_norm(self) -> None:
+        # Using a norm should plot a nice colorbar and look consistent with pcolormesh.
+        norm = mpl.colors.LogNorm(0.1, 1e1)
+
+        with pytest.warns(UserWarning):
+            artist = self.plotmethod(norm=norm, add_colorbar=True)
+
+        actual = artist.colorbar.locator()
+        expected = np.array([0.01, 0.1, 1.0, 10.0])
+
+        np.testing.assert_allclose(actual, expected)
+
 
 @pytest.mark.slow
 class TestContour(Common2dMixin, PlotTestCase):
@@ -1789,16 +1804,18 @@ class TestContour(Common2dMixin, PlotTes
         artist = self.plotmethod(colors="k")
         assert artist.cmap.colors[0] == "k"
 
+        # 2 colors, will repeat every other tick:
         artist = self.plotmethod(colors=["k", "b"])
-        assert self._color_as_tuple(artist.cmap.colors[1]) == (0.0, 0.0, 1.0)
+        assert artist.cmap.colors[:2] == ["k", "b"]
 
+        # 4 colors, will repeat every 4th tick:
         artist = self.darray.plot.contour(
             levels=[-0.5, 0.0, 0.5, 1.0], colors=["k", "r", "w", "b"]
         )
-        assert self._color_as_tuple(artist.cmap.colors[1]) == (1.0, 0.0, 0.0)
-        assert self._color_as_tuple(artist.cmap.colors[2]) == (1.0, 1.0, 1.0)
+        assert artist.cmap.colors[:5] == ["k", "r", "w", "b"]  # type: ignore[attr-defined,unused-ignore]
+
         # the last color is now under "over"
-        assert self._color_as_tuple(artist.cmap._rgba_over) == (0.0, 0.0, 1.0)
+        assert self._color_as_tuple(artist.cmap.get_over()) == (0.0, 0.0, 1.0)
 
     def test_colors_np_levels(self) -> None:
         # https://github.com/pydata/xarray/issues/3284
@@ -1806,15 +1823,11 @@ class TestContour(Common2dMixin, PlotTes
         artist = self.darray.plot.contour(levels=levels, colors=["k", "r", "w", "b"])
         cmap = artist.cmap
         assert isinstance(cmap, mpl.colors.ListedColormap)
-        # non-optimal typing in matplotlib (ArrayLike)
-        # https://github.com/matplotlib/matplotlib/blob/84464dd085210fb57cc2419f0d4c0235391d97e6/lib/matplotlib/colors.pyi#L133
-        colors = cast(np.ndarray, cmap.colors)
 
-        assert self._color_as_tuple(colors[1]) == (1.0, 0.0, 0.0)
-        assert self._color_as_tuple(colors[2]) == (1.0, 1.0, 1.0)
+        assert artist.cmap.colors[:5] == ["k", "r", "w", "b"]  # type: ignore[attr-defined,unused-ignore]
+
         # the last color is now under "over"
-        assert hasattr(cmap, "_rgba_over")
-        assert self._color_as_tuple(cmap._rgba_over) == (0.0, 0.0, 1.0)
+        assert self._color_as_tuple(cmap.get_over()) == (0.0, 0.0, 1.0)
 
     def test_cmap_and_color_both(self) -> None:
         with pytest.raises(ValueError):
@@ -1838,6 +1851,18 @@ class TestContour(Common2dMixin, PlotTes
         self.plotmethod(levels=[0.1])
         self.plotmethod(levels=1)
 
+    def test_colormap_norm(self) -> None:
+        # Using a norm should plot a nice colorbar and look consistent with pcolormesh.
+        norm = mpl.colors.LogNorm(0.1, 1e1)
+
+        with pytest.warns(UserWarning):
+            artist = self.plotmethod(norm=norm, add_colorbar=True)
+
+        actual = artist.colorbar.locator()
+        expected = np.array([0.01, 0.1, 1.0, 10.0])
+
+        np.testing.assert_allclose(actual, expected)
+
 
 class TestPcolormesh(Common2dMixin, PlotTestCase):
     plotfunc = staticmethod(xplt.pcolormesh)
@@ -2303,10 +2328,8 @@ class TestFacetGrid(PlotTestCase):
         numbers = set()
         alltxt = text_in_fig()
         for txt in alltxt:
-            try:
+            with contextlib.suppress(ValueError):
                 numbers.add(float(txt))
-            except ValueError:
-                pass
         largest = max(abs(x) for x in numbers)
         assert largest < 21
 
@@ -2702,9 +2725,9 @@ class TestDatasetStreamplotPlots(PlotTes
     def setUp(self) -> None:
         das = [
             DataArray(
-                np.random.randn(3, 3, 2, 2),
+                np.random.randn(3, 4, 2, 2),
                 dims=["x", "y", "row", "col"],
-                coords=[range(k) for k in [3, 3, 2, 2]],
+                coords=[range(k) for k in [3, 4, 2, 2]],
             )
             for _ in [1, 2]
         ]
@@ -2793,7 +2816,7 @@ class TestDatasetScatterPlots(PlotTestCa
     def test_add_guide(
         self,
         add_guide: bool | None,
-        hue_style: Literal["continuous", "discrete", None],
+        hue_style: Literal["continuous", "discrete"] | None,
         legend: bool,
         colorbar: bool,
     ) -> None:
@@ -2921,7 +2944,9 @@ class TestDatasetScatterPlots(PlotTestCa
         pc = ds2.plot.scatter(x="A", y="B", markersize="hue")
         axes = pc.axes
         assert axes is not None
-        actual = [t.get_text() for t in axes.get_legend().texts]
+        legend = axes.get_legend()
+        assert legend is not None
+        actual = [t.get_text() for t in legend.texts]
         expected = ["hue", "a", "b"]
         assert actual == expected
 
@@ -2966,7 +2991,7 @@ class TestDatetimePlot(PlotTestCase):
 
     def test_datetime_units(self) -> None:
         # test that matplotlib-native datetime works:
-        fig, ax = plt.subplots()
+        _fig, ax = plt.subplots()
         ax.plot(self.darray["time"], self.darray)
 
         # Make sure only mpl converters are used, use type() so only
@@ -3187,8 +3212,8 @@ def test_plot_transposes_properly(plotfu
 def test_facetgrid_single_contour() -> None:
     # regression test for GH3569
     x, y = np.meshgrid(np.arange(12), np.arange(12))
-    z = xr.DataArray(np.sqrt(x**2 + y**2))
-    z2 = xr.DataArray(np.sqrt(x**2 + y**2) + 1)
+    z = xr.DataArray(np.hypot(x, y))
+    z2 = xr.DataArray(np.hypot(x, y) + 1)
     ds = xr.concat([z, z2], dim="time")
     ds["time"] = [0, 1]
 
@@ -3431,7 +3456,7 @@ def test_plot1d_default_rcparams() -> No
     with figure_context():
         # scatter markers should by default have white edgecolor to better
         # see overlapping markers:
-        fig, ax = plt.subplots(1, 1)
+        _fig, ax = plt.subplots(1, 1)
         ds.plot.scatter(x="A", y="B", marker="o", ax=ax)
         actual: np.ndarray = mpl.colors.to_rgba_array("w")
         expected: np.ndarray = ax.collections[0].get_edgecolor()  # type: ignore[assignment]
@@ -3441,16 +3466,16 @@ def test_plot1d_default_rcparams() -> No
         fg = ds.plot.scatter(x="A", y="B", col="x", marker="o")
         ax = fg.axs.ravel()[0]
         actual = mpl.colors.to_rgba_array("w")
-        expected = ax.collections[0].get_edgecolor()  # type: ignore[assignment]
+        expected = ax.collections[0].get_edgecolor()  # type: ignore[assignment,unused-ignore]
         np.testing.assert_allclose(actual, expected)
 
         # scatter should not emit any warnings when using unfilled markers:
         with assert_no_warnings():
-            fig, ax = plt.subplots(1, 1)
+            _fig, ax = plt.subplots(1, 1)
             ds.plot.scatter(x="A", y="B", ax=ax, marker="x")
 
         # Prioritize edgecolor argument over default plot1d values:
-        fig, ax = plt.subplots(1, 1)
+        _fig, ax = plt.subplots(1, 1)
         ds.plot.scatter(x="A", y="B", marker="o", ax=ax, edgecolor="k")
         actual = mpl.colors.to_rgba_array("k")
         expected = ax.collections[0].get_edgecolor()  # type: ignore[assignment]
@@ -3476,7 +3501,7 @@ def test_9155() -> None:
 
     with figure_context():
         data = xr.DataArray([1, 2, 3], dims=["x"])
-        fig, ax = plt.subplots(ncols=1, nrows=1)
+        _fig, ax = plt.subplots(ncols=1, nrows=1)
         data.plot(ax=ax)  # type: ignore[call-arg]
 
 
diff -pruN 2025.03.1-8/xarray/tests/test_plugins.py 2025.10.1-1/xarray/tests/test_plugins.py
--- 2025.03.1-8/xarray/tests/test_plugins.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/xarray/tests/test_plugins.py	2025-10-10 10:38:05.000000000 +0000
@@ -2,11 +2,13 @@ from __future__ import annotations
 
 import sys
 from importlib.metadata import EntryPoint, EntryPoints
+from itertools import starmap
 from unittest import mock
 
 import pytest
 
 from xarray.backends import common, plugins
+from xarray.core.options import OPTIONS
 from xarray.tests import (
     has_h5netcdf,
     has_netCDF4,
@@ -48,7 +50,7 @@ def dummy_duplicated_entrypoints():
         ["engine2", "xarray.tests.test_plugins:backend_1", "xarray.backends"],
         ["engine2", "xarray.tests.test_plugins:backend_2", "xarray.backends"],
     ]
-    eps = [EntryPoint(name, value, group) for name, value, group in specs]
+    eps = list(starmap(EntryPoint, specs))
     return eps
 
 
@@ -91,7 +93,7 @@ def test_backends_dict_from_pkg() -> Non
         ["engine1", "xarray.tests.test_plugins:backend_1", "xarray.backends"],
         ["engine2", "xarray.tests.test_plugins:backend_2", "xarray.backends"],
     ]
-    entrypoints = [EntryPoint(name, value, group) for name, value, group in specs]
+    entrypoints = list(starmap(EntryPoint, specs))
     engines = plugins.backends_dict_from_pkg(entrypoints)
     assert len(engines) == 2
     assert engines.keys() == {"engine1", "engine2"}
@@ -170,7 +172,7 @@ def test_build_engines_sorted() -> None:
     backend_entrypoints = list(plugins.build_engines(dummy_pkg_entrypoints))
 
     indices = []
-    for be in plugins.STANDARD_BACKENDS_ORDER:
+    for be in OPTIONS["netcdf_engine_order"]:
         try:
             index = backend_entrypoints.index(be)
             backend_entrypoints.pop(index)
@@ -226,7 +228,7 @@ def test_lazy_import() -> None:
         "numbagg",
         "pint",
         "pydap",
-        "scipy",
+        # "scipy",  # TODO: xarray.backends.scipy_ is currently not lazy
         "sparse",
         "zarr",
     ]
diff -pruN 2025.03.1-8/xarray/tests/test_range_index.py 2025.10.1-1/xarray/tests/test_range_index.py
--- 2025.03.1-8/xarray/tests/test_range_index.py	1970-01-01 00:00:00.000000000 +0000
+++ 2025.10.1-1/xarray/tests/test_range_index.py	2025-10-10 10:38:05.000000000 +0000
@@ -0,0 +1,303 @@
+import numpy as np
+import pandas as pd
+import pytest
+
+import xarray as xr
+from xarray.indexes import PandasIndex, RangeIndex
+from xarray.tests import assert_allclose, assert_equal, assert_identical
+
+
+def create_dataset_arange(
+    start: float, stop: float, step: float, dim: str = "x"
+) -> xr.Dataset:
+    index = RangeIndex.arange(start, stop, step, dim=dim)
+    return xr.Dataset(coords=xr.Coordinates.from_xindex(index))
+
+
+@pytest.mark.parametrize(
+    "args,kwargs",
+    [
+        ((10.0,), {}),
+        ((), {"stop": 10.0}),
+        (
+            (
+                2.0,
+                10.0,
+            ),
+            {},
+        ),
+        ((2.0,), {"stop": 10.0}),
+        ((), {"start": 2.0, "stop": 10.0}),
+        ((2.0, 10.0, 2.0), {}),
+        ((), {"start": 2.0, "stop": 10.0, "step": 2.0}),
+    ],
+)
+def test_range_index_arange(args, kwargs) -> None:
+    index = RangeIndex.arange(*args, **kwargs, dim="x")
+    actual = xr.Coordinates.from_xindex(index)
+    expected = xr.Coordinates({"x": np.arange(*args, **kwargs)})
+    assert_equal(actual, expected, check_default_indexes=False)
+
+
+def test_range_index_arange_error() -> None:
+    with pytest.raises(TypeError, match=r".*requires stop to be specified"):
+        RangeIndex.arange(dim="x")
+
+
+def test_range_index_arange_start_as_stop() -> None:
+    # Weird although probably very unlikely case where only `start` is given
+    # as keyword argument, which is interpreted as `stop`.
+    # This has been fixed in numpy (https://github.com/numpy/numpy/pull/17878)
+    # using Python C API. In pure Python it's more tricky as there's no easy way to know
+    # whether a value has been passed as positional or keyword argument.
+    # Note: `pandas.RangeIndex` constructor still has this weird behavior.
+    index = RangeIndex.arange(start=10.0, dim="x")
+    actual = xr.Coordinates.from_xindex(index)
+    expected = xr.Coordinates({"x": np.arange(10.0)})
+    assert_equal(actual, expected, check_default_indexes=False)
+
+
+def test_range_index_arange_properties() -> None:
+    index = RangeIndex.arange(0.0, 1.0, 0.1, dim="x")
+    assert index.start == 0.0
+    assert index.stop == 1.0
+    assert index.step == 0.1
+
+
+def test_range_index_linspace() -> None:
+    index = RangeIndex.linspace(0.0, 1.0, num=10, endpoint=False, dim="x")
+    actual = xr.Coordinates.from_xindex(index)
+    expected = xr.Coordinates({"x": np.linspace(0.0, 1.0, num=10, endpoint=False)})
+    assert_equal(actual, expected, check_default_indexes=False)
+    assert index.start == 0.0
+    assert index.stop == 1.0
+    assert index.step == 0.1
+
+    index = RangeIndex.linspace(0.0, 1.0, num=11, endpoint=True, dim="x")
+    actual = xr.Coordinates.from_xindex(index)
+    expected = xr.Coordinates({"x": np.linspace(0.0, 1.0, num=11, endpoint=True)})
+    assert_allclose(actual, expected, check_default_indexes=False)
+    assert index.start == 0.0
+    assert index.stop == 1.1
+    assert index.step == 0.1
+
+
+def test_range_index_dtype() -> None:
+    index = RangeIndex.arange(0.0, 1.0, 0.1, dim="x", dtype=np.float32)
+    coords = xr.Coordinates.from_xindex(index)
+    assert coords["x"].dtype == np.dtype(np.float32)
+
+
+def test_range_index_set_xindex() -> None:
+    coords = xr.Coordinates({"x": np.arange(0.0, 1.0, 0.1)}, indexes={})
+    ds = xr.Dataset(coords=coords)
+
+    with pytest.raises(
+        NotImplementedError, match=r"cannot create.*RangeIndex.*existing coordinate"
+    ):
+        ds.set_xindex("x", RangeIndex)
+
+
+def test_range_index_isel() -> None:
+    ds = create_dataset_arange(0.0, 1.0, 0.1)
+
+    # slicing
+    actual = ds.isel(x=slice(None))
+    assert_identical(actual, ds, check_default_indexes=False)
+
+    actual = ds.isel(x=slice(1, None))
+    expected = create_dataset_arange(0.1, 1.0, 0.1)
+    assert_identical(actual, expected, check_default_indexes=False)
+
+    actual = ds.isel(x=slice(None, 2))
+    expected = create_dataset_arange(0.0, 0.2, 0.1)
+    assert_identical(actual, expected, check_default_indexes=False)
+
+    actual = ds.isel(x=slice(1, 3))
+    expected = create_dataset_arange(0.1, 0.3, 0.1)
+    assert_identical(actual, expected, check_default_indexes=False)
+
+    actual = ds.isel(x=slice(None, None, 2))
+    expected = create_dataset_arange(0.0, 1.0, 0.2)
+    assert_identical(actual, expected, check_default_indexes=False)
+
+    actual = ds.isel(x=slice(None, None, -1))
+    expected = create_dataset_arange(0.9, -0.1, -0.1)
+    assert_identical(actual, expected, check_default_indexes=False)
+
+    actual = ds.isel(x=slice(None, 4, -1))
+    expected = create_dataset_arange(0.9, 0.4, -0.1)
+    assert_identical(actual, expected, check_default_indexes=False)
+
+    actual = ds.isel(x=slice(8, 4, -1))
+    expected = create_dataset_arange(0.8, 0.4, -0.1)
+    assert_identical(actual, expected, check_default_indexes=False)
+
+    actual = ds.isel(x=slice(8, None, -1))
+    expected = create_dataset_arange(0.8, -0.1, -0.1)
+    assert_identical(actual, expected, check_default_indexes=False)
+
+    # https://github.com/pydata/xarray/issues/10441
+    ds2 = create_dataset_arange(0.0, 3.0, 0.1)
+    actual = ds2.isel(x=slice(4, None, 3))
+    expected = create_dataset_arange(0.4, 3.0, 0.3)
+    assert_identical(actual, expected, check_default_indexes=False)
+
+    # scalar
+    actual = ds.isel(x=0)
+    expected = xr.Dataset(coords={"x": 0.0})
+    assert_identical(actual, expected)
+
+    # outer indexing with arbitrary array values
+    actual = ds.isel(x=[0, 2])
+    expected = xr.Dataset(coords={"x": [0.0, 0.2]})
+    assert_identical(actual, expected)
+    assert isinstance(actual.xindexes["x"], PandasIndex)
+
+    # fancy indexing with 1-d Variable
+    actual = ds.isel(x=xr.Variable("y", [0, 2]))
+    expected = xr.Dataset(coords={"x": ("y", [0.0, 0.2])}).set_xindex("x")
+    assert_identical(actual, expected, check_default_indexes=False)
+    assert isinstance(actual.xindexes["x"], PandasIndex)
+
+    # fancy indexing with n-d Variable
+    actual = ds.isel(x=xr.Variable(("u", "v"), [[0, 0], [2, 2]]))
+    expected = xr.Dataset(coords={"x": (("u", "v"), [[0.0, 0.0], [0.2, 0.2]])})
+    assert_identical(actual, expected)
+
+
+def test_range_index_empty_slice() -> None:
+    """Test that empty slices of RangeIndex are printable and preserve step.
+
+    Regression test for https://github.com/pydata/xarray/issues/10547
+    """
+    # Test with linspace
+    n = 30
+    step = 1
+    da = xr.DataArray(np.zeros(n), dims=["x"])
+    da = da.assign_coords(
+        xr.Coordinates.from_xindex(RangeIndex.linspace(0, (n - 1) * step, n, dim="x"))
+    )
+
+    # This should not raise ZeroDivisionError
+    sub = da.isel(x=slice(0))
+    assert sub.sizes["x"] == 0
+
+    # Test that it's printable
+    repr_str = repr(sub)
+    assert "RangeIndex" in repr_str
+    assert "step=1" in repr_str
+
+    # Test with different step values
+    index = RangeIndex.arange(0, 10, 2.5, dim="y")
+    da2 = xr.DataArray(np.zeros(4), dims=["y"])
+    da2 = da2.assign_coords(xr.Coordinates.from_xindex(index))
+    empty = da2.isel(y=slice(0))
+
+    # Should preserve step
+    assert empty.sizes["y"] == 0
+    range_index_y = empty._indexes["y"]
+    assert isinstance(range_index_y, RangeIndex)
+    assert range_index_y.step == 2.5
+
+    # Test that it's printable
+    repr_str2 = repr(empty)
+    assert "RangeIndex" in repr_str2
+    assert "step=2.5" in repr_str2
+
+    # Test negative step
+    index3 = RangeIndex.arange(10, 0, -1, dim="z")
+    da3 = xr.DataArray(np.zeros(10), dims=["z"])
+    da3 = da3.assign_coords(xr.Coordinates.from_xindex(index3))
+    empty3 = da3.isel(z=slice(0))
+
+    assert empty3.sizes["z"] == 0
+    range_index_z = empty3._indexes["z"]
+    assert isinstance(range_index_z, RangeIndex)
+    assert range_index_z.step == -1.0
+
+    # Test that it's printable
+    repr_str3 = repr(empty3)
+    assert "RangeIndex" in repr_str3
+    assert "step=-1" in repr_str3
+
+
+def test_range_index_sel() -> None:
+    ds = create_dataset_arange(0.0, 1.0, 0.1)
+
+    # start-stop slice
+    actual = ds.sel(x=slice(0.12, 0.28), method="nearest")
+    expected = create_dataset_arange(0.1, 0.3, 0.1)
+    assert_identical(actual, expected, check_default_indexes=False)
+
+    # start-stop-step slice
+    actual = ds.sel(x=slice(0.0, 1.0, 0.2), method="nearest")
+    expected = ds.isel(x=range(0, 10, 2))
+    assert_identical(actual, expected, check_default_indexes=False)
+
+    # basic indexing
+    actual = ds.sel(x=0.52, method="nearest")
+    expected = xr.Dataset(coords={"x": 0.5})
+    assert_allclose(actual, expected)
+
+    actual = ds.sel(x=0.58, method="nearest")
+    expected = xr.Dataset(coords={"x": 0.6})
+    assert_allclose(actual, expected)
+
+    # 1-d array indexing
+    actual = ds.sel(x=[0.52, 0.58], method="nearest")
+    expected = xr.Dataset(coords={"x": [0.5, 0.6]})
+    assert_allclose(actual, expected)
+
+    actual = ds.sel(x=xr.Variable("y", [0.52, 0.58]), method="nearest")
+    expected = xr.Dataset(coords={"x": ("y", [0.5, 0.6])}).set_xindex("x")
+    assert_allclose(actual, expected, check_default_indexes=False)
+
+    actual = ds.sel(x=xr.DataArray([0.52, 0.58], dims="y"), method="nearest")
+    expected = xr.Dataset(coords={"x": ("y", [0.5, 0.6])}).set_xindex("x")
+    assert_allclose(actual, expected, check_default_indexes=False)
+
+    with pytest.raises(ValueError, match=r"RangeIndex only supports.*method.*nearest"):
+        ds.sel(x=0.1)
+
+    with pytest.raises(ValueError, match=r"RangeIndex doesn't support.*tolerance"):
+        ds.sel(x=0.1, method="nearest", tolerance=1e-3)
+
+
+def test_range_index_to_pandas_index() -> None:
+    ds = create_dataset_arange(0.0, 1.0, 0.1)
+
+    actual = ds.indexes["x"]
+    expected = pd.Index(np.arange(0.0, 1.0, 0.1))
+    assert actual.equals(expected)
+
+
+def test_range_index_rename() -> None:
+    index = RangeIndex.arange(0.0, 1.0, 0.1, dim="x")
+    ds = xr.Dataset(coords=xr.Coordinates.from_xindex(index))
+
+    actual = ds.rename_vars(x="y")
+    idx = RangeIndex.arange(0.0, 1.0, 0.1, coord_name="y", dim="x")
+    expected = xr.Dataset(coords=xr.Coordinates.from_xindex(idx))
+    assert_identical(actual, expected, check_default_indexes=False)
+
+    actual = ds.rename_dims(x="y")
+    idx = RangeIndex.arange(0.0, 1.0, 0.1, coord_name="x", dim="y")
+    expected = xr.Dataset(coords=xr.Coordinates.from_xindex(idx))
+    assert_identical(actual, expected, check_default_indexes=False)
+
+
+def test_range_index_repr() -> None:
+    index = RangeIndex.arange(0.0, 1.0, 0.1, dim="x")
+    actual = repr(index)
+    expected = (
+        "RangeIndex (start=0, stop=1, step=0.1, size=10, coord_name='x', dim='x')"
+    )
+    assert actual == expected
+
+
+def test_range_index_repr_inline() -> None:
+    index = RangeIndex.arange(0.0, 1.0, 0.1, dim="x")
+    actual = index._repr_inline_(max_width=70)
+    expected = "RangeIndex (start=0, stop=1, step=0.1)"
+    assert actual == expected
diff -pruN 2025.03.1-8/xarray/tests/test_rolling.py 2025.10.1-1/xarray/tests/test_rolling.py
--- 2025.03.1-8/xarray/tests/test_rolling.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/xarray/tests/test_rolling.py	2025-10-10 10:38:05.000000000 +0000
@@ -341,7 +341,7 @@ class TestDataArrayRolling:
         assert_allclose(actual, expected)
         assert actual.sizes == expected.sizes
 
-        if name in ["mean"]:
+        if name == "mean":
             # test our reimplementation of nanmean using np.nanmean
             expected = getattr(rolling_obj.construct({"time": "tw", "x": "xw"}), name)(
                 ["tw", "xw"]
@@ -433,6 +433,20 @@ class TestDataArrayRolling:
         chunked_result = data.chunk({"x": 1}).rolling(x=3, min_periods=1).mean()
         assert chunked_result.dtype == unchunked_result.dtype
 
+    def test_rolling_mean_bool(self) -> None:
+        bool_raster = DataArray(
+            data=[0, 1, 1, 0, 1, 0],
+            dims=("x"),
+        ).astype(bool)
+
+        expected = DataArray(
+            data=[np.nan, 2 / 3, 2 / 3, 2 / 3, 1 / 3, np.nan],
+            dims=("x"),
+        )
+
+        result = bool_raster.rolling(x=3, center=True).mean()
+        assert_allclose(result, expected)
+
 
 @requires_numbagg
 class TestDataArrayRollingExp:
diff -pruN 2025.03.1-8/xarray/tests/test_sparse.py 2025.10.1-1/xarray/tests/test_sparse.py
--- 2025.03.1-8/xarray/tests/test_sparse.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/xarray/tests/test_sparse.py	2025-10-10 10:38:05.000000000 +0000
@@ -7,6 +7,7 @@ from textwrap import dedent
 import numpy as np
 import pandas as pd
 import pytest
+from packaging.version import Version
 
 import xarray as xr
 import xarray.ufuncs as xu
@@ -235,15 +236,14 @@ def test_variable_method(func, sparse_ou
     if sparse_output:
         assert isinstance(ret_s.data, sparse.SparseArray)
         assert np.allclose(ret_s.data.todense(), ret_d.data, equal_nan=True)
+    elif func.meth != "to_dict":
+        assert np.allclose(ret_s, ret_d)
     else:
-        if func.meth != "to_dict":
-            assert np.allclose(ret_s, ret_d)
-        else:
-            # pop the arrays from the dict
-            arr_s, arr_d = ret_s.pop("data"), ret_d.pop("data")
+        # pop the arrays from the dict
+        arr_s, arr_d = ret_s.pop("data"), ret_d.pop("data")
 
-            assert np.allclose(arr_s, arr_d)
-            assert ret_s == ret_d
+        assert np.allclose(arr_s, arr_d)
+        assert ret_s == ret_d
 
 
 @pytest.mark.parametrize(
@@ -661,16 +661,19 @@ class TestSparseDataArrayAndDataset:
             sparse.concatenate([self.sp_ar, self.sp_ar, self.sp_ar], axis=0),
         )
 
-        out = xr.concat([self.sp_xr, self.sp_xr, self.sp_xr], dim="y")
+        out_concat = xr.concat([self.sp_xr, self.sp_xr, self.sp_xr], dim="y")
         assert_sparse_equal(
-            out.data, sparse.concatenate([self.sp_ar, self.sp_ar, self.sp_ar], axis=1)
+            out_concat.data,
+            sparse.concatenate([self.sp_ar, self.sp_ar, self.sp_ar], axis=1),
         )
 
     def test_stack(self):
         arr = make_xrarray({"w": 2, "x": 3, "y": 4})
         stacked = arr.stack(z=("x", "y"))
 
-        z = pd.MultiIndex.from_product([np.arange(3), np.arange(4)], names=["x", "y"])
+        z = pd.MultiIndex.from_product(
+            [list(range(3)), list(range(4))], names=["x", "y"]
+        )
 
         expected = xr.DataArray(
             arr.data.reshape((2, -1)), {"w": [0, 1], "z": z}, dims=["w", "z"]
@@ -719,13 +722,17 @@ class TestSparseDataArrayAndDataset:
         ds = xr.Dataset(
             data_vars={"a": ("x", sparse.COO.from_numpy(np.ones(4)))}
         ).chunk()
+        if Version(sparse.__version__) >= Version("0.16.0"):
+            meta = "sparse.numba_backend._coo.core.COO"
+        else:
+            meta = "sparse.COO"
         expected = dedent(
-            """\
+            f"""\
             <xarray.Dataset> Size: 32B
             Dimensions:  (x: 4)
             Dimensions without coordinates: x
             Data variables:
-                a        (x) float64 32B dask.array<chunksize=(4,), meta=sparse.COO>"""
+                a        (x) float64 32B dask.array<chunksize=(4,), meta={meta}>"""
         )
         assert expected == repr(ds)
 
@@ -749,8 +756,8 @@ class TestSparseDataArrayAndDataset:
     def test_coarsen(self):
         a1 = self.ds_xr
         a2 = self.sp_xr
-        m1 = a1.coarsen(x=2, boundary="trim").mean()
-        m2 = a2.coarsen(x=2, boundary="trim").mean()
+        m1 = a1.coarsen(x=2, boundary="trim").mean()  # type: ignore[attr-defined]
+        m2 = a2.coarsen(x=2, boundary="trim").mean()  # type: ignore[attr-defined]
 
         assert isinstance(m2.data, sparse.SparseArray)
         assert np.allclose(m1.data, m2.data.todense())
@@ -777,7 +784,7 @@ class TestSparseDataArrayAndDataset:
 
     @pytest.mark.xfail(reason="No implementation of np.einsum")
     def test_dot(self):
-        a1 = self.xp_xr.dot(self.xp_xr[0])
+        a1 = self.sp_xr.dot(self.sp_xr[0])
         a2 = self.sp_ar.dot(self.sp_ar[0])
         assert_equal(a1, a2)
 
@@ -831,8 +838,8 @@ class TestSparseDataArrayAndDataset:
             {"x": [1, 100, 2, 101, 3]},
             {"x": [2.5, 3, 3.5], "y": [2, 2.5, 3]},
         ]:
-            m1 = x1.reindex(**kwargs)
-            m2 = x2.reindex(**kwargs)
+            m1 = x1.reindex(**kwargs)  # type: ignore[arg-type]
+            m2 = x2.reindex(**kwargs)  # type: ignore[arg-type]
             assert np.allclose(m1, m2, equal_nan=True)
 
     @pytest.mark.xfail
@@ -848,12 +855,12 @@ class TestSparseDataArrayAndDataset:
         xr.DataArray(a).where(cond)
 
         s = sparse.COO.from_numpy(a)
-        cond = s > 3
-        xr.DataArray(s).where(cond)
+        cond2 = s > 3
+        xr.DataArray(s).where(cond2)
 
         x = xr.DataArray(s)
-        cond = x > 3
-        x.where(cond)
+        cond3: DataArray = x > 3
+        x.where(cond3)
 
 
 class TestSparseCoords:
diff -pruN 2025.03.1-8/xarray/tests/test_treenode.py 2025.10.1-1/xarray/tests/test_treenode.py
--- 2025.03.1-8/xarray/tests/test_treenode.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/xarray/tests/test_treenode.py	2025-10-10 10:38:05.000000000 +0000
@@ -100,7 +100,7 @@ class TestFamilyTree:
         john: TreeNode = TreeNode()
 
         with pytest.raises(TypeError):
-            john.children = {"Kate": 666}
+            john.children = {"Kate": 666}  # type: ignore[dict-item]
 
         with pytest.raises(InvalidTreeError, match="Cannot add same node"):
             john.children = {"Kate": kate, "Evil_Kate": kate}
diff -pruN 2025.03.1-8/xarray/tests/test_tutorial.py 2025.10.1-1/xarray/tests/test_tutorial.py
--- 2025.03.1-8/xarray/tests/test_tutorial.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/xarray/tests/test_tutorial.py	2025-10-10 10:38:05.000000000 +0000
@@ -9,19 +9,14 @@ from xarray.tests import network
 class TestLoadDataset:
     def test_download_from_github(self, tmp_path) -> None:
         cache_dir = tmp_path / tutorial._default_cache_dir_name
-        ds = tutorial.open_dataset("tiny", cache_dir=cache_dir).load()
+        ds = tutorial.load_dataset("tiny", cache_dir=cache_dir)
         tiny = DataArray(range(5), name="tiny").to_dataset()
         assert_identical(ds, tiny)
 
-    def test_download_from_github_load_without_cache(
-        self, tmp_path, monkeypatch
-    ) -> None:
+    def test_download_from_github_load_without_cache(self, tmp_path) -> None:
         cache_dir = tmp_path / tutorial._default_cache_dir_name
-
-        ds_nocache = tutorial.open_dataset(
-            "tiny", cache=False, cache_dir=cache_dir
-        ).load()
-        ds_cache = tutorial.open_dataset("tiny", cache_dir=cache_dir).load()
+        ds_nocache = tutorial.load_dataset("tiny", cache=False, cache_dir=cache_dir)
+        ds_cache = tutorial.load_dataset("tiny", cache_dir=cache_dir)
         assert_identical(ds_cache, ds_nocache)
 
 
@@ -29,17 +24,12 @@ class TestLoadDataset:
 class TestLoadDataTree:
     def test_download_from_github(self, tmp_path) -> None:
         cache_dir = tmp_path / tutorial._default_cache_dir_name
-        ds = tutorial.open_datatree("tiny", cache_dir=cache_dir).load()
+        ds = tutorial.load_datatree("tiny", cache_dir=cache_dir)
         tiny = DataTree.from_dict({"/": DataArray(range(5), name="tiny").to_dataset()})
         assert_identical(ds, tiny)
 
-    def test_download_from_github_load_without_cache(
-        self, tmp_path, monkeypatch
-    ) -> None:
+    def test_download_from_github_load_without_cache(self, tmp_path) -> None:
         cache_dir = tmp_path / tutorial._default_cache_dir_name
-
-        ds_nocache = tutorial.open_datatree(
-            "tiny", cache=False, cache_dir=cache_dir
-        ).load()
-        ds_cache = tutorial.open_datatree("tiny", cache_dir=cache_dir).load()
+        ds_nocache = tutorial.load_datatree("tiny", cache=False, cache_dir=cache_dir)
+        ds_cache = tutorial.load_datatree("tiny", cache_dir=cache_dir)
         assert_identical(ds_cache, ds_nocache)
diff -pruN 2025.03.1-8/xarray/tests/test_ufuncs.py 2025.10.1-1/xarray/tests/test_ufuncs.py
--- 2025.03.1-8/xarray/tests/test_ufuncs.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/xarray/tests/test_ufuncs.py	2025-10-10 10:38:05.000000000 +0000
@@ -4,6 +4,7 @@ import pickle
 from unittest.mock import patch
 
 import numpy as np
+import numpy.typing as npt
 import pytest
 
 import xarray as xr
@@ -33,7 +34,7 @@ def test_unary(a):
 
 
 def test_binary():
-    args = [
+    args: list[int | float | npt.NDArray | xr.Variable | xr.DataArray | xr.Dataset] = [
         0,
         np.zeros(2),
         xr.Variable(["x"], [0, 0]),
@@ -49,7 +50,7 @@ def test_binary():
 
 
 def test_binary_out():
-    args = [
+    args: list[int | float | npt.NDArray | xr.Variable | xr.DataArray | xr.Dataset] = [
         1,
         np.ones(2),
         xr.Variable(["x"], [1, 1]),
@@ -62,26 +63,39 @@ def test_binary_out():
         assert_identical(actual_exponent, arg)
 
 
+def test_binary_coord_attrs():
+    t = xr.Variable("t", np.arange(2, 4), attrs={"units": "s"})
+    x = xr.DataArray(t.values**2, coords={"t": t}, attrs={"units": "s^2"})
+    y = xr.DataArray(t.values**3, coords={"t": t}, attrs={"units": "s^3"})
+    z1 = xr.apply_ufunc(np.add, x, y, keep_attrs=True)
+    assert z1.coords["t"].attrs == {"units": "s"}
+    z2 = xr.apply_ufunc(np.add, x, y, keep_attrs=False)
+    assert z2.coords["t"].attrs == {}
+    # Check also that input array's coordinate attributes weren't affected
+    assert t.attrs == {"units": "s"}
+    assert x.coords["t"].attrs == {"units": "s"}
+
+
 def test_groupby():
     ds = xr.Dataset({"a": ("x", [0, 0, 0])}, {"c": ("x", [0, 0, 1])})
     ds_grouped = ds.groupby("c")
     group_mean = ds_grouped.mean("x")
     arr_grouped = ds["a"].groupby("c")
 
-    assert_identical(ds, np.maximum(ds_grouped, group_mean))
-    assert_identical(ds, np.maximum(group_mean, ds_grouped))
+    assert_identical(ds, np.maximum(ds_grouped, group_mean))  # type: ignore[call-overload]
+    assert_identical(ds, np.maximum(group_mean, ds_grouped))  # type: ignore[call-overload]
 
-    assert_identical(ds, np.maximum(arr_grouped, group_mean))
-    assert_identical(ds, np.maximum(group_mean, arr_grouped))
+    assert_identical(ds, np.maximum(arr_grouped, group_mean))  # type: ignore[call-overload]
+    assert_identical(ds, np.maximum(group_mean, arr_grouped))  # type: ignore[call-overload]
 
-    assert_identical(ds, np.maximum(ds_grouped, group_mean["a"]))
-    assert_identical(ds, np.maximum(group_mean["a"], ds_grouped))
+    assert_identical(ds, np.maximum(ds_grouped, group_mean["a"]))  # type: ignore[call-overload]
+    assert_identical(ds, np.maximum(group_mean["a"], ds_grouped))  # type: ignore[call-overload]
 
-    assert_identical(ds.a, np.maximum(arr_grouped, group_mean.a))
-    assert_identical(ds.a, np.maximum(group_mean.a, arr_grouped))
+    assert_identical(ds.a, np.maximum(arr_grouped, group_mean.a))  # type: ignore[call-overload]
+    assert_identical(ds.a, np.maximum(group_mean.a, arr_grouped))  # type: ignore[call-overload]
 
     with pytest.raises(ValueError, match=r"mismatched lengths for dimension"):
-        np.maximum(ds.a.variable, ds_grouped)
+        np.maximum(ds.a.variable, ds_grouped)  # type: ignore[call-overload]
 
 
 def test_alignment():
@@ -113,8 +127,8 @@ def test_xarray_defers_to_unrecognized_t
 
     xarray_obj = xr.DataArray([1, 2, 3])
     other = Other()
-    assert np.maximum(xarray_obj, other) == "other"
-    assert np.sin(xarray_obj, out=other) == "other"
+    assert np.maximum(xarray_obj, other) == "other"  # type: ignore[call-overload]
+    assert np.sin(xarray_obj, out=other) == "other"  # type: ignore[call-overload]
 
 
 def test_xarray_handles_dask():
@@ -146,7 +160,7 @@ def test_out():
 
     # xarray out arguments should raise
     with pytest.raises(NotImplementedError, match=r"`out` argument"):
-        np.add(xarray_obj, 1, out=xarray_obj)
+        np.add(xarray_obj, 1, out=xarray_obj)  # type: ignore[call-overload]
 
     # but non-xarray should be OK
     other = np.zeros((3,))
@@ -168,7 +182,7 @@ class DuckArray(np.ndarray):
         obj = np.asarray(array).view(cls)
         return obj
 
-    def __array_namespace__(self):
+    def __array_namespace__(self, *, api_version=None):
         return DuckArray
 
     @staticmethod
@@ -181,7 +195,7 @@ class DuckArray(np.ndarray):
 
 
 class DuckArray2(DuckArray):
-    def __array_namespace__(self):
+    def __array_namespace__(self, *, api_version=None):
         return DuckArray2
 
 
@@ -203,12 +217,12 @@ class TestXarrayUfuncs:
 
         if name == "isnat":
             args = (self.xt,)
-        elif hasattr(np_func, "nin") and np_func.nin == 2:
-            args = (self.x, self.x)
+        elif hasattr(np_func, "nin") and np_func.nin == 2:  # type: ignore[union-attr]
+            args = (self.x, self.x)  # type: ignore[assignment]
         else:
             args = (self.x,)
 
-        expected = np_func(*args)
+        expected = np_func(*args)  # type: ignore[misc]
         actual = xu_func(*args)
 
         if name in ["angle", "iscomplex"]:
diff -pruN 2025.03.1-8/xarray/tests/test_units.py 2025.10.1-1/xarray/tests/test_units.py
--- 2025.03.1-8/xarray/tests/test_units.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/xarray/tests/test_units.py	2025-10-10 10:38:05.000000000 +0000
@@ -1,7 +1,9 @@
 from __future__ import annotations
 
+import contextlib
 import functools
 import operator
+from typing import Any
 
 import numpy as np
 import pytest
@@ -20,16 +22,26 @@ from xarray.tests import (
 from xarray.tests.test_plot import PlotTestCase
 from xarray.tests.test_variable import _PAD_XR_NP_ARGS
 
-try:
+with contextlib.suppress(ImportError):
     import matplotlib.pyplot as plt
-except ImportError:
-    pass
 
 
 pint = pytest.importorskip("pint")
 DimensionalityError = pint.errors.DimensionalityError
 
 
+def create_nan_array(values, dtype):
+    """Create array with NaN values, handling cast warnings for int dtypes."""
+    import warnings
+
+    # When casting float arrays with NaN to integer, NumPy raises a warning
+    # This is expected behavior when dtype is int
+    with warnings.catch_warnings():
+        if np.issubdtype(dtype, np.integer):
+            warnings.filterwarnings("ignore", "invalid value encountered in cast")
+        return np.array(values).astype(dtype)
+
+
 # make sure scalars are converted to 0d arrays so quantities can
 # always be treated like ndarrays
 unit_registry = pint.UnitRegistry(force_ndarray_like=True)
@@ -155,12 +167,12 @@ def strip_units(obj):
             for name, value in obj.coords.items()
         }
 
-        new_obj = xr.DataArray(
+        new_obj = xr.DataArray(  # type: ignore[assignment]
             name=strip_units(obj.name), data=data, coords=coords, dims=obj.dims
         )
     elif isinstance(obj, xr.Variable):
         data = array_strip_units(obj.data)
-        new_obj = obj.copy(data=data)
+        new_obj = obj.copy(data=data)  # type: ignore[assignment]
     elif isinstance(obj, unit_registry.Quantity):
         new_obj = obj.magnitude
     elif isinstance(obj, list | tuple):
@@ -203,14 +215,14 @@ def attach_units(obj, units):
         dims = obj.dims
         attrs = obj.attrs
 
-        new_obj = xr.DataArray(
+        new_obj = xr.DataArray(  # type: ignore[assignment]
             name=obj.name, data=data, coords=coords, attrs=attrs, dims=dims
         )
     else:
         data_units = units.get("data", None) or units.get(None, None) or 1
 
         data = array_attach_units(obj.data, data_units)
-        new_obj = obj.copy(data=data)
+        new_obj = obj.copy(data=data)  # type: ignore[assignment]
 
     return new_obj
 
@@ -235,9 +247,7 @@ def convert_units(obj, to):
     elif isinstance(obj, xr.DataArray):
         name = obj.name
 
-        new_units = (
-            to.get(name, None) or to.get("data", None) or to.get(None, None) or None
-        )
+        new_units = to.get(name) or to.get("data") or to.get(None) or None
         data = convert_units(obj.variable, {None: new_units})
 
         coords = {
@@ -246,12 +256,12 @@ def convert_units(obj, to):
             if name != obj.name
         }
 
-        new_obj = xr.DataArray(
+        new_obj = xr.DataArray(  # type: ignore[assignment]
             name=name, data=data, coords=coords, attrs=obj.attrs, dims=obj.dims
         )
     elif isinstance(obj, xr.Variable):
         new_data = convert_units(obj.data, to)
-        new_obj = obj.copy(data=new_data)
+        new_obj = obj.copy(data=new_data)  # type: ignore[assignment]
     elif isinstance(obj, unit_registry.Quantity):
         units = to.get(None)
         new_obj = obj.to(units) if units is not None else obj
@@ -320,12 +330,14 @@ class method:
             if self.fallback is not None:
                 func = partial(self.fallback, obj)
             else:
-                func = getattr(obj, self.name, None)
+                func_attr = getattr(obj, self.name, None)
 
-                if func is None or not callable(func):
+                if func_attr is None or not callable(func_attr):
                     # fall back to module level numpy functions
                     numpy_func = getattr(np, self.name)
                     func = partial(numpy_func, obj)
+                else:
+                    func = func_attr
         else:
             func = getattr(obj, self.name)
 
@@ -386,7 +398,7 @@ def test_apply_ufunc_dataarray(variant,
         "dims": (1, unit_registry.m, 1),
         "coords": (1, 1, unit_registry.m),
     }
-    data_unit, dim_unit, coord_unit = variants.get(variant)
+    data_unit, dim_unit, coord_unit = variants[variant]
     func = functools.partial(
         xr.apply_ufunc, np.mean, input_core_dims=[["x"]], kwargs={"axis": -1}
     )
@@ -419,8 +431,7 @@ def test_apply_ufunc_dataset(variant, dt
         "dims": (1, unit_registry.m, 1),
         "coords": (1, 1, unit_registry.s),
     }
-    data_unit, dim_unit, coord_unit = variants.get(variant)
-
+    data_unit, dim_unit, coord_unit = variants[variant]
     func = functools.partial(
         xr.apply_ufunc, np.mean, input_core_dims=[["x"]], kwargs={"axis": -1}
     )
@@ -493,7 +504,7 @@ def test_align_dataarray(value, variant,
         (data_unit1, data_unit2),
         (dim_unit1, dim_unit2),
         (coord_unit1, coord_unit2),
-    ) = variants.get(variant)
+    ) = variants[variant]
 
     array1 = np.linspace(0, 10, 2 * 5).reshape(2, 5).astype(dtype) * data_unit1
     array2 = np.linspace(0, 8, 2 * 5).reshape(2, 5).astype(dtype) * data_unit2
@@ -597,7 +608,7 @@ def test_align_dataset(value, unit, vari
         (data_unit1, data_unit2),
         (dim_unit1, dim_unit2),
         (coord_unit1, coord_unit2),
-    ) = variants.get(variant)
+    ) = variants[variant]
 
     array1 = np.linspace(0, 10, 2 * 5).reshape(2, 5).astype(dtype) * data_unit1
     array2 = np.linspace(0, 10, 2 * 5).reshape(2, 5).astype(dtype) * data_unit2
@@ -734,6 +745,9 @@ def test_broadcast_dataset(dtype):
         "coords",
     ),
 )
+@pytest.mark.filterwarnings(
+    "ignore:.*the default value for coords will change:FutureWarning"
+)
 def test_combine_by_coords(variant, unit, error, dtype):
     original_unit = unit_registry.m
 
@@ -746,7 +760,7 @@ def test_combine_by_coords(variant, unit
         (data_unit1, data_unit2),
         (dim_unit1, dim_unit2),
         (coord_unit1, coord_unit2),
-    ) = variants.get(variant)
+    ) = variants[variant]
 
     array1 = np.zeros(shape=(2, 3), dtype=dtype) * data_unit1
     array2 = np.zeros(shape=(2, 3), dtype=dtype) * data_unit1
@@ -811,6 +825,12 @@ def test_combine_by_coords(variant, unit
         "coords",
     ),
 )
+@pytest.mark.filterwarnings(
+    "ignore:.*the default value for join will change:FutureWarning"
+)
+@pytest.mark.filterwarnings(
+    "ignore:.*the default value for compat will change:FutureWarning"
+)
 def test_combine_nested(variant, unit, error, dtype):
     original_unit = unit_registry.m
 
@@ -823,7 +843,7 @@ def test_combine_nested(variant, unit, e
         (data_unit1, data_unit2),
         (dim_unit1, dim_unit2),
         (coord_unit1, coord_unit2),
-    ) = variants.get(variant)
+    ) = variants[variant]
 
     array1 = np.zeros(shape=(2, 3), dtype=dtype) * data_unit1
     array2 = np.zeros(shape=(2, 3), dtype=dtype) * data_unit1
@@ -929,7 +949,7 @@ def test_concat_dataarray(variant, unit,
         (data_unit1, data_unit2),
         (dim_unit1, dim_unit2),
         (coord_unit1, coord_unit2),
-    ) = variants.get(variant)
+    ) = variants[variant]
 
     array1 = np.linspace(0, 5, 10).astype(dtype) * data_unit1
     array2 = np.linspace(-5, 0, 5).astype(dtype) * data_unit2
@@ -997,7 +1017,7 @@ def test_concat_dataset(variant, unit, e
         (data_unit1, data_unit2),
         (dim_unit1, dim_unit2),
         (coord_unit1, coord_unit2),
-    ) = variants.get(variant)
+    ) = variants[variant]
 
     array1 = np.linspace(0, 5, 10).astype(dtype) * data_unit1
     array2 = np.linspace(-5, 0, 5).astype(dtype) * data_unit2
@@ -1051,6 +1071,12 @@ def test_concat_dataset(variant, unit, e
         "coords",
     ),
 )
+@pytest.mark.filterwarnings(
+    "ignore:.*the default value for join will change:FutureWarning"
+)
+@pytest.mark.filterwarnings(
+    "ignore:.*the default value for compat will change:FutureWarning"
+)
 def test_merge_dataarray(variant, unit, error, dtype):
     original_unit = unit_registry.m
 
@@ -1063,7 +1089,7 @@ def test_merge_dataarray(variant, unit,
         (data_unit1, data_unit2),
         (dim_unit1, dim_unit2),
         (coord_unit1, coord_unit2),
-    ) = variants.get(variant)
+    ) = variants[variant]
 
     array1 = np.linspace(0, 1, 2 * 3).reshape(2, 3).astype(dtype) * data_unit1
     x1 = np.arange(2) * dim_unit1
@@ -1155,6 +1181,12 @@ def test_merge_dataarray(variant, unit,
         "coords",
     ),
 )
+@pytest.mark.filterwarnings(
+    "ignore:.*the default value for join will change:FutureWarning"
+)
+@pytest.mark.filterwarnings(
+    "ignore:.*the default value for compat will change:FutureWarning"
+)
 def test_merge_dataset(variant, unit, error, dtype):
     original_unit = unit_registry.m
 
@@ -1167,7 +1199,7 @@ def test_merge_dataset(variant, unit, er
         (data_unit1, data_unit2),
         (dim_unit1, dim_unit2),
         (coord_unit1, coord_unit2),
-    ) = variants.get(variant)
+    ) = variants[variant]
 
     array1 = np.zeros(shape=(2, 3), dtype=dtype) * data_unit1
     array2 = np.zeros(shape=(2, 3), dtype=dtype) * data_unit1
@@ -1241,8 +1273,7 @@ def test_replication_dataarray(func, var
         "dims": (1, unit, 1),
         "coords": (1, 1, unit),
     }
-    data_unit, dim_unit, coord_unit = variants.get(variant)
-
+    data_unit, dim_unit, coord_unit = variants[variant]
     array = np.linspace(0, 10, 20).astype(dtype) * data_unit
     x = np.arange(20) * dim_unit
     u = np.linspace(0, 1, 20) * coord_unit
@@ -1277,7 +1308,7 @@ def test_replication_dataset(func, varia
         "dims": ((1, 1), unit, 1),
         "coords": ((1, 1), 1, unit),
     }
-    (data_unit1, data_unit2), dim_unit, coord_unit = variants.get(variant)
+    (data_unit1, data_unit2), dim_unit, coord_unit = variants[variant]
 
     array1 = np.linspace(0, 10, 20).astype(dtype) * data_unit1
     array2 = np.linspace(5, 10, 10).astype(dtype) * data_unit2
@@ -1326,8 +1357,7 @@ def test_replication_full_like_dataarray
         "dims": (1, unit, 1),
         "coords": (1, 1, unit),
     }
-    data_unit, dim_unit, coord_unit = variants.get(variant)
-
+    data_unit, dim_unit, coord_unit = variants[variant]
     array = np.linspace(0, 5, 10) * data_unit
     x = np.arange(10) * dim_unit
     u = np.linspace(0, 1, 10) * coord_unit
@@ -1367,7 +1397,7 @@ def test_replication_full_like_dataset(v
         "dims": ((1, 1), unit, 1),
         "coords": ((1, 1), 1, unit),
     }
-    (data_unit1, data_unit2), dim_unit, coord_unit = variants.get(variant)
+    (data_unit1, data_unit2), dim_unit, coord_unit = variants[variant]
 
     array1 = np.linspace(0, 10, 20).astype(dtype) * data_unit1
     array2 = np.linspace(5, 10, 10).astype(dtype) * data_unit2
@@ -1385,7 +1415,7 @@ def test_replication_full_like_dataset(v
 
     units = {
         **extract_units(ds),
-        **{name: unit_registry.degK for name in ds.data_vars},
+        **dict.fromkeys(ds.data_vars, unit_registry.degK),
     }
     expected = attach_units(
         xr.full_like(strip_units(ds), fill_value=strip_units(fill_value)), units
@@ -1871,7 +1901,7 @@ class TestVariable:
     )
     def test_isel(self, variable, indexers, dask, dtype):
         if dask:
-            variable = variable.chunk({dim: 2 for dim in variable.dims})
+            variable = variable.chunk(dict.fromkeys(variable.dims, 2))
         quantified = xr.Variable(
             variable.dims, variable.data.astype(dtype) * unit_registry.s
         )
@@ -2065,7 +2095,7 @@ class TestVariable:
 
         if error is not None:
             with pytest.raises(error):
-                variable.searchsorted(value)
+                variable.searchsorted(value)  # type: ignore[attr-defined]
 
             return
 
@@ -2073,7 +2103,7 @@ class TestVariable:
             strip_units(convert_units(value, {None: base_unit}))
         )
 
-        actual = variable.searchsorted(value)
+        actual = variable.searchsorted(value)  # type: ignore[attr-defined]
 
         assert_units_equal(expected, actual)
         np.testing.assert_allclose(expected, actual)
@@ -2299,7 +2329,7 @@ class TestDataArray:
             "without_coords": {},
         }
 
-        kwargs = {"data": array, "dims": "x", "coords": variants.get(variant)}
+        kwargs = {"data": array, "dims": "x", "coords": variants[variant]}
         data_array = xr.DataArray(**kwargs)
 
         assert isinstance(data_array.data, Quantity)
@@ -2335,7 +2365,7 @@ class TestDataArray:
             "without_coords": {},
         }
 
-        kwargs = {"data": array, "dims": "x", "coords": variants.get(variant)}
+        kwargs = {"data": array, "dims": "x", "coords": variants[variant]}
         data_array = xr.DataArray(**kwargs)
 
         # FIXME: this just checks that the repr does not raise
@@ -2651,7 +2681,7 @@ class TestDataArray:
         data_array = xr.DataArray(data=array)
 
         scalar_types = (int, float)
-        args = list(value * unit for value in func.args)
+        args = [value * unit for value in func.args]
         kwargs = {
             key: (value * unit if isinstance(value, scalar_types) else value)
             for key, value in func.kwargs.items()
@@ -2709,7 +2739,7 @@ class TestDataArray:
         data_array = xr.DataArray(data=array)
 
         scalar_types = (int, float)
-        args = list(value * unit for value in func.args)
+        args = [value * unit for value in func.args]
         kwargs = {
             key: (value * unit if isinstance(value, scalar_types) else value)
             for key, value in func.kwargs.items()
@@ -2763,7 +2793,7 @@ class TestDataArray:
     @pytest.mark.parametrize("func", (method("ffill"), method("bfill")), ids=repr)
     def test_missing_value_filling(self, func, dtype):
         array = (
-            np.array([1.4, np.nan, 2.3, np.nan, np.nan, 9.1]).astype(dtype)
+            create_nan_array([1.4, np.nan, 2.3, np.nan, np.nan, 9.1], dtype)
             * unit_registry.degK
         )
         x = np.arange(len(array))
@@ -2800,7 +2830,7 @@ class TestDataArray:
     def test_fillna(self, fill_value, unit, error, dtype):
         original_unit = unit_registry.m
         array = (
-            np.array([1.4, np.nan, 2.3, np.nan, np.nan, 9.1]).astype(dtype)
+            create_nan_array([1.4, np.nan, 2.3, np.nan, np.nan, 9.1], dtype)
             * original_unit
         )
         data_array = xr.DataArray(data=array)
@@ -2828,7 +2858,7 @@ class TestDataArray:
 
     def test_dropna(self, dtype):
         array = (
-            np.array([1.4, np.nan, 2.3, np.nan, np.nan, 9.1]).astype(dtype)
+            create_nan_array([1.4, np.nan, 2.3, np.nan, np.nan, 9.1], dtype)
             * unit_registry.m
         )
         x = np.arange(len(array))
@@ -2853,12 +2883,12 @@ class TestDataArray:
     )
     def test_isin(self, unit, dtype):
         array = (
-            np.array([1.4, np.nan, 2.3, np.nan, np.nan, 9.1]).astype(dtype)
+            create_nan_array([1.4, np.nan, 2.3, np.nan, np.nan, 9.1], dtype)
             * unit_registry.m
         )
         data_array = xr.DataArray(data=array, dims="x")
 
-        raw_values = np.array([1.4, np.nan, 2.3]).astype(dtype)
+        raw_values = create_nan_array([1.4, np.nan, 2.3], dtype)
         values = raw_values * unit
 
         units = {None: unit_registry.m if array.check(unit) else None}
@@ -2899,7 +2929,7 @@ class TestDataArray:
             "replacing_array": {"cond": condition, "other": other},
             "dropping": {"cond": condition, "drop": True},
         }
-        kwargs = variant_kwargs.get(variant)
+        kwargs = variant_kwargs[variant]
         kwargs_without_units = {
             key: strip_units(
                 convert_units(
@@ -3041,7 +3071,7 @@ class TestDataArray:
             "dims": (1, unit, 1),
             "coords": (1, 1, unit),
         }
-        data_unit, dim_unit, coord_unit = variations.get(variation)
+        data_unit, dim_unit, coord_unit = variations[variation]
 
         data_array = xr.DataArray(data=array, coords={"x": x, "y": ("x", y)}, dims="x")
 
@@ -3098,7 +3128,7 @@ class TestDataArray:
             (data_unit1, data_unit2),
             (dim_unit1, dim_unit2),
             (coord_unit1, coord_unit2),
-        ) = variants.get(variant)
+        ) = variants[variant]
 
         array1 = np.linspace(1, 2, 2 * 1).reshape(2, 1).astype(dtype) * data_unit1
         array2 = np.linspace(0, 1, 2 * 3).reshape(2, 3).astype(dtype) * data_unit2
@@ -3210,8 +3240,7 @@ class TestDataArray:
             "dims": (1, unit, 1),
             "coords": (1, 1, unit),
         }
-        data_unit, dim_unit, coord_unit = variants.get(variant)
-
+        data_unit, dim_unit, coord_unit = variants[variant]
         quantity = np.linspace(0, 10, 5 * 10).reshape(5, 10).astype(dtype) * data_unit
         x = np.arange(quantity.shape[0]) * dim_unit
         y = np.arange(quantity.shape[1]) * dim_unit
@@ -3479,7 +3508,7 @@ class TestDataArray:
             "data": (unit_registry.m, 1),
             "coords": (1, unit_registry.m),
         }
-        data_unit, coord_unit = variants.get(variant)
+        data_unit, coord_unit = variants[variant]
 
         array = np.linspace(1, 2, 10).astype(dtype) * data_unit
         y = np.arange(10) * coord_unit
@@ -3554,7 +3583,7 @@ class TestDataArray:
             "data": (unit_registry.m, 1),
             "coords": (1, unit_registry.m),
         }
-        data_unit, coord_unit = variants.get(variant)
+        data_unit, coord_unit = variants[variant]
 
         array = np.linspace(1, 2, 10).astype(dtype) * data_unit
         coord = np.arange(10) * coord_unit
@@ -3734,7 +3763,7 @@ class TestDataArray:
             "dims": ("x", unit, 1),
             "coords": ("u", 1, unit),
         }
-        coord, dim_unit, coord_unit = variants.get(variant)
+        coord, dim_unit, coord_unit = variants[variant]
 
         array = np.linspace(0, 10, 5 * 10).reshape(5, 10).astype(dtype) * data_unit
 
@@ -3796,8 +3825,7 @@ class TestDataArray:
             "dims": (1, unit, 1),
             "coords": (1, 1, unit),
         }
-        data_unit, dim_unit, coord_unit = variants.get(variant)
-
+        data_unit, dim_unit, coord_unit = variants[variant]
         array = np.linspace(0, 10, 5 * 10).reshape(5, 10).astype(dtype) * data_unit
 
         x = np.arange(array.shape[0]) * dim_unit
@@ -3859,8 +3887,7 @@ class TestDataArray:
             "dims": (1, unit, 1),
             "coords": (1, 1, unit),
         }
-        data_unit, dim_unit, coord_unit = variants.get(variant)
-
+        data_unit, dim_unit, coord_unit = variants[variant]
         array = np.linspace(0, 10, 5 * 10).reshape(5, 10).astype(dtype) * data_unit
 
         x = np.array([0, 0, 1, 2, 2]) * dim_unit
@@ -3923,7 +3950,7 @@ class TestDataArray:
             "dims": (1, unit, 1),
             "coords": (1, 1, unit),
         }
-        data_unit, dim_unit, coord_unit = variants.get(variant)
+        data_unit, dim_unit, coord_unit = variants[variant]
         array = np.linspace(0, 10, 5 * 10).reshape(5, 10).astype(dtype) * data_unit
 
         x = np.arange(array.shape[0]) * dim_unit
@@ -4007,7 +4034,7 @@ class TestDataset:
                 {"x": values_b, "y": ("x", coord_b)},
             ),
         }
-        coords_a, coords_b = variants.get(shared)
+        coords_a, coords_b = variants[shared]
 
         dims_a, dims_b = ("x", "y") if shared == "nothing" else ("x", "x")
 
@@ -4065,7 +4092,7 @@ class TestDataset:
 
         ds = xr.Dataset(
             data_vars={"a": ("x", array1), "b": ("x", array2)},
-            coords=variants.get(variant),
+            coords=variants[variant],
         )
 
         # FIXME: this just checks that the repr does not raise
@@ -4249,11 +4276,11 @@ class TestDataset:
     @pytest.mark.parametrize("func", (method("ffill"), method("bfill")), ids=repr)
     def test_missing_value_filling(self, func, dtype):
         array1 = (
-            np.array([1.4, np.nan, 2.3, np.nan, np.nan, 9.1]).astype(dtype)
+            create_nan_array([1.4, np.nan, 2.3, np.nan, np.nan, 9.1], dtype)
             * unit_registry.degK
         )
         array2 = (
-            np.array([4.3, 9.8, 7.5, np.nan, 8.2, np.nan]).astype(dtype)
+            create_nan_array([4.3, 9.8, 7.5, np.nan, 8.2, np.nan], dtype)
             * unit_registry.Pa
         )
 
@@ -4292,11 +4319,11 @@ class TestDataset:
     )
     def test_fillna(self, fill_value, unit, error, dtype):
         array1 = (
-            np.array([1.4, np.nan, 2.3, np.nan, np.nan, 9.1]).astype(dtype)
+            create_nan_array([1.4, np.nan, 2.3, np.nan, np.nan, 9.1], dtype)
             * unit_registry.m
         )
         array2 = (
-            np.array([4.3, 9.8, 7.5, np.nan, 8.2, np.nan]).astype(dtype)
+            create_nan_array([4.3, 9.8, 7.5, np.nan, 8.2, np.nan], dtype)
             * unit_registry.m
         )
         ds = xr.Dataset({"a": ("x", array1), "b": ("x", array2)})
@@ -4322,11 +4349,11 @@ class TestDataset:
 
     def test_dropna(self, dtype):
         array1 = (
-            np.array([1.4, np.nan, 2.3, np.nan, np.nan, 9.1]).astype(dtype)
+            create_nan_array([1.4, np.nan, 2.3, np.nan, np.nan, 9.1], dtype)
             * unit_registry.degK
         )
         array2 = (
-            np.array([4.3, 9.8, 7.5, np.nan, 8.2, np.nan]).astype(dtype)
+            create_nan_array([4.3, 9.8, 7.5, np.nan, 8.2, np.nan], dtype)
             * unit_registry.Pa
         )
         ds = xr.Dataset({"a": ("x", array1), "b": ("x", array2)})
@@ -4350,16 +4377,16 @@ class TestDataset:
     )
     def test_isin(self, unit, dtype):
         array1 = (
-            np.array([1.4, np.nan, 2.3, np.nan, np.nan, 9.1]).astype(dtype)
+            create_nan_array([1.4, np.nan, 2.3, np.nan, np.nan, 9.1], dtype)
             * unit_registry.m
         )
         array2 = (
-            np.array([4.3, 9.8, 7.5, np.nan, 8.2, np.nan]).astype(dtype)
+            create_nan_array([4.3, 9.8, 7.5, np.nan, 8.2, np.nan], dtype)
             * unit_registry.m
         )
         ds = xr.Dataset({"a": ("x", array1), "b": ("x", array2)})
 
-        raw_values = np.array([1.4, np.nan, 2.3]).astype(dtype)
+        raw_values = create_nan_array([1.4, np.nan, 2.3], dtype)
         values = raw_values * unit
 
         converted_values = (
@@ -4411,7 +4438,7 @@ class TestDataset:
             "replacing_array": {"cond": condition, "other": other},
             "dropping": {"cond": condition, "drop": True},
         }
-        kwargs = variant_kwargs.get(variant)
+        kwargs = variant_kwargs[variant]
         if variant not in ("masking", "dropping") and error is not None:
             with pytest.raises(error):
                 ds.where(**kwargs)
@@ -4435,11 +4462,11 @@ class TestDataset:
     @pytest.mark.xfail(reason="interpolate_na uses numpy.vectorize")
     def test_interpolate_na(self, dtype):
         array1 = (
-            np.array([1.4, np.nan, 2.3, np.nan, np.nan, 9.1]).astype(dtype)
+            create_nan_array([1.4, np.nan, 2.3, np.nan, np.nan, 9.1], dtype)
             * unit_registry.degK
         )
         array2 = (
-            np.array([4.3, 9.8, 7.5, np.nan, 8.2, np.nan]).astype(dtype)
+            create_nan_array([4.3, 9.8, 7.5, np.nan, 8.2, np.nan], dtype)
             * unit_registry.Pa
         )
         ds = xr.Dataset({"a": ("x", array1), "b": ("x", array2)})
@@ -4481,13 +4508,13 @@ class TestDataset:
             "data": (unit_registry.m, unit, 1, 1),
             "dims": (1, 1, unit_registry.m, unit),
         }
-        data_unit, other_data_unit, dims_unit, other_dims_unit = variants.get(variant)
+        data_unit, other_data_unit, dims_unit, other_dims_unit = variants[variant]
 
         array1 = (
-            np.array([1.4, np.nan, 2.3, np.nan, np.nan, 9.1]).astype(dtype) * data_unit
+            create_nan_array([1.4, np.nan, 2.3, np.nan, np.nan, 9.1], dtype) * data_unit
         )
         array2 = (
-            np.array([4.3, 9.8, 7.5, np.nan, 8.2, np.nan]).astype(dtype) * data_unit
+            create_nan_array([4.3, 9.8, 7.5, np.nan, 8.2, np.nan], dtype) * data_unit
         )
         x = np.arange(len(array1)) * dims_unit
         ds = xr.Dataset(
@@ -4561,8 +4588,7 @@ class TestDataset:
             "dims": (1, unit_registry.m, 1),
             "coords": (1, 1, unit_registry.m),
         }
-        data_unit, dim_unit, coord_unit = variants.get(variant)
-
+        data_unit, dim_unit, coord_unit = variants[variant]
         a = array1 * data_unit
         b = array2 * data_unit
         x = coord * dim_unit
@@ -4579,7 +4605,7 @@ class TestDataset:
             "dims": (1, unit, 1),
             "coords": (1, 1, unit),
         }
-        other_data_unit, other_dim_unit, other_coord_unit = other_variants.get(variant)
+        other_data_unit, other_dim_unit, other_coord_unit = other_variants[variant]
 
         other_units = {
             "a": other_data_unit,
@@ -4637,7 +4663,7 @@ class TestDataset:
             "data": ((unit_registry.m, unit), (1, 1)),
             "dims": ((1, 1), (unit_registry.m, unit)),
         }
-        (data_unit1, data_unit2), (dim_unit1, dim_unit2) = variants.get(variant)
+        (data_unit1, data_unit2), (dim_unit1, dim_unit2) = variants[variant]
 
         array1 = np.linspace(1, 2, 2 * 1).reshape(2, 1).astype(dtype) * data_unit1
         array2 = np.linspace(0, 1, 2 * 3).reshape(2, 3).astype(dtype) * data_unit2
@@ -4729,7 +4755,7 @@ class TestDataset:
             "data": (unit_registry.m, 1),
             "dims": (1, unit_registry.m),
         }
-        data_unit, dim_unit = variants.get(variant)
+        data_unit, dim_unit = variants[variant]
 
         array1 = np.linspace(0, 10, 5 * 10).reshape(5, 10).astype(dtype) * data_unit
         array2 = (
@@ -5034,7 +5060,7 @@ class TestDataset:
             "dims": ((1, 1), unit_registry.m, 1),
             "coords": ((1, 1), 1, unit_registry.m),
         }
-        (unit_a, unit_b), dim_unit, coord_unit = variants.get(variant)
+        (unit_a, unit_b), dim_unit, coord_unit = variants[variant]
 
         array1 = np.linspace(1, 2, 10 * 5).reshape(10, 5) * unit_a
         array2 = np.linspace(1, 2, 10 * 8).reshape(10, 8) * unit_b
@@ -5116,7 +5142,7 @@ class TestDataset:
             "data": (unit_registry.m, 1),
             "coords": (1, unit_registry.m),
         }
-        data_unit, coord_unit = variants.get(variant)
+        data_unit, coord_unit = variants[variant]
 
         array1 = np.linspace(-1, 0, 10).astype(dtype) * data_unit
         array2 = np.linspace(0, 1, 10).astype(dtype) * data_unit
@@ -5189,7 +5215,7 @@ class TestDataset:
             "data": (unit_registry.m, 1),
             "coords": (1, unit_registry.m),
         }
-        data_unit, coord_unit = variants.get(variant)
+        data_unit, coord_unit = variants[variant]
 
         array1 = np.linspace(-1, 0, 10).astype(dtype) * data_unit
         array2 = np.linspace(0, 1, 10).astype(dtype) * data_unit
@@ -5281,7 +5307,7 @@ class TestDataset:
             "dims": ((1, 1), unit_registry.m, 1),
             "coords": ((1, 1), 1, unit_registry.m),
         }
-        (unit1, unit2), dim_unit, coord_unit = variants.get(variant)
+        (unit1, unit2), dim_unit, coord_unit = variants[variant]
 
         array1 = np.linspace(-5, 5, 4 * 5).reshape(4, 5).astype(dtype) * unit1
         array2 = np.linspace(10, 20, 4 * 3).reshape(4, 3).astype(dtype) * unit2
@@ -5340,7 +5366,7 @@ class TestDataset:
             "dims": ((1, 1), unit_registry.m, 1),
             "coords": ((1, 1), 1, unit_registry.m),
         }
-        (unit1, unit2), dim_unit, coord_unit = variants.get(variant)
+        (unit1, unit2), dim_unit, coord_unit = variants[variant]
 
         array1 = np.linspace(-5, 5, 4 * 5).reshape(4, 5).astype(dtype) * unit1
         array2 = np.linspace(10, 20, 4 * 3).reshape(4, 3).astype(dtype) * unit2
@@ -5358,7 +5384,7 @@ class TestDataset:
         # Doesn't work with flox because pint doesn't implement
         # ufunc.reduceat or np.bincount
         #  kwargs = {"engine": "numpy"} if "groupby" in func.name else {}
-        kwargs = {}
+        kwargs: dict[str, Any] = {}
         expected = attach_units(func(strip_units(ds)).mean(*args, **kwargs), units)
         actual = func(ds).mean(*args, **kwargs)
 
@@ -5382,7 +5408,7 @@ class TestDataset:
             "dims": ((1, 1), unit_registry.m, 1),
             "coords": ((1, 1), 1, unit_registry.m),
         }
-        (unit1, unit2), dim_unit, coord_unit = variants.get(variant)
+        (unit1, unit2), dim_unit, coord_unit = variants[variant]
 
         array1 = np.linspace(-5, 5, 10 * 5).reshape(10, 5).astype(dtype) * unit1
         array2 = np.linspace(10, 20, 10 * 8).reshape(10, 8).astype(dtype) * unit2
@@ -5435,7 +5461,7 @@ class TestDataset:
             "dims": ((1, 1), unit_registry.m, 1),
             "coords": ((1, 1), 1, unit_registry.m),
         }
-        (unit1, unit2), dim_unit, coord_unit = variants.get(variant)
+        (unit1, unit2), dim_unit, coord_unit = variants[variant]
 
         array1 = np.linspace(-5, 5, 5 * 4).reshape(5, 4).astype(dtype) * unit1
         array2 = np.linspace(10, 20, 5 * 4 * 3).reshape(5, 4, 3).astype(dtype) * unit2
@@ -5509,7 +5535,7 @@ class TestDataset:
             "dims": ((1, 1, 1), unit_registry.m, 1),
             "coords": ((1, 1, 1), 1, unit_registry.m),
         }
-        (unit1, unit2, unit3), dim_unit, coord_unit = variants.get(variant)
+        (unit1, unit2, unit3), dim_unit, coord_unit = variants[variant]
 
         array1 = np.linspace(-5, 5, 5 * 4).reshape(5, 4).astype(dtype) * unit1
         array2 = np.linspace(10, 20, 5 * 4 * 3).reshape(5, 4, 3).astype(dtype) * unit2
@@ -5573,6 +5599,12 @@ class TestDataset:
             "coords",
         ),
     )
+    @pytest.mark.filterwarnings(
+        "ignore:.*the default value for join will change:FutureWarning"
+    )
+    @pytest.mark.filterwarnings(
+        "ignore:.*the default value for compat will change:FutureWarning"
+    )
     def test_merge(self, variant, unit, error, dtype):
         left_variants = {
             "data": (unit_registry.m, 1, 1),
@@ -5580,14 +5612,14 @@ class TestDataset:
             "coords": (1, 1, unit_registry.m),
         }
 
-        left_data_unit, left_dim_unit, left_coord_unit = left_variants.get(variant)
+        left_data_unit, left_dim_unit, left_coord_unit = left_variants[variant]
 
         right_variants = {
             "data": (unit, 1, 1),
             "dims": (1, unit, 1),
             "coords": (1, 1, unit),
         }
-        right_data_unit, right_dim_unit, right_coord_unit = right_variants.get(variant)
+        right_data_unit, right_dim_unit, right_coord_unit = right_variants[variant]
 
         left_array = np.arange(10).astype(dtype) * left_data_unit
         right_array = np.arange(-5, 5).astype(dtype) * right_data_unit
@@ -5638,7 +5670,7 @@ class TestPintWrappingDask:
 
         assert_units_equal(expected, actual)
         # Don't use isinstance b/c we don't want to allow subclasses through
-        assert type(expected.data) == type(actual.data)  # noqa: E721
+        assert type(expected.data) is type(actual.data)
 
 
 @requires_matplotlib
@@ -5687,7 +5719,7 @@ class TestPlots(PlotTestCase):
             ),
             dims=("a", "b"),
         )
-        arr.sel(a=5).plot(marker="o")
+        arr.sel(a=5).plot(marker="o")  # type: ignore[call-arg]
 
         assert plt.gca().get_title() == "a = 5 [meter]"
 
@@ -5712,14 +5744,14 @@ class TestPlots(PlotTestCase):
             ),
             dims=("x", "y"),
         )
-        arr.isel(x=0).plot(marker="o")
+        arr.isel(x=0).plot(marker="o")  # type: ignore[call-arg]
         assert plt.gca().get_title() == "a = 5 [meter]"
 
     def test_units_in_2d_plot_colorbar_label(self):
         arr = np.ones((2, 3)) * unit_registry.Pa
         da = xr.DataArray(data=arr, dims=["x", "y"], name="pressure")
 
-        fig, (ax, cax) = plt.subplots(1, 2)
+        _fig, (ax, cax) = plt.subplots(1, 2)
         ax = da.plot.contourf(ax=ax, cbar_ax=cax, add_colorbar=True)
 
         assert cax.get_ylabel() == "pressure [pascal]"
@@ -5728,7 +5760,7 @@ class TestPlots(PlotTestCase):
         arr = np.ones((2, 3)) * unit_registry.Pa
         da = xr.DataArray(data=arr, dims=["x", "y"], name="pressure")
 
-        fig, (ax, cax) = plt.subplots(1, 2)
+        _fig, (_ax, _cax) = plt.subplots(1, 2)
         fgrid = da.plot.line(x="x", col="y")
 
         assert fgrid.axs[0, 0].get_ylabel() == "pressure [pascal]"
@@ -5743,7 +5775,7 @@ class TestPlots(PlotTestCase):
         arr = np.ones((2, 3, 4)) * unit_registry.Pa
         da = xr.DataArray(data=arr, dims=["x", "y", "z"], name="pressure")
 
-        fig, (ax1, ax2, ax3, cax) = plt.subplots(1, 4)
+        _fig, (_ax1, _ax2, _ax3, _cax) = plt.subplots(1, 4)
         fgrid = da.plot.contourf(x="x", y="y", col="z")
 
-        assert fgrid.cbar.ax.get_ylabel() == "pressure [pascal]"
+        assert fgrid.cbar.ax.get_ylabel() == "pressure [pascal]"  # type: ignore[union-attr]
diff -pruN 2025.03.1-8/xarray/tests/test_utils.py 2025.10.1-1/xarray/tests/test_utils.py
--- 2025.03.1-8/xarray/tests/test_utils.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/xarray/tests/test_utils.py	2025-10-10 10:38:05.000000000 +0000
@@ -11,6 +11,7 @@ from xarray.core import duck_array_ops,
 from xarray.core.utils import (
     attempt_import,
     either_dict_or_kwargs,
+    flat_items,
     infix_dims,
     iterate_nested,
 )
@@ -23,7 +24,7 @@ class TestAlias:
             pass
 
         old_method = utils.alias(new_method, "old_method")
-        assert "deprecated" in old_method.__doc__
+        assert "deprecated" in old_method.__doc__  # type: ignore[operator]
         with pytest.warns(Warning, match="deprecated"):
             old_method()
 
@@ -102,10 +103,10 @@ class TestDictionaries:
             utils.compat_dict_union(self.x, self.z)
 
     def test_dict_equiv(self):
-        x = {}
+        x: dict = {}
         x["a"] = 3
         x["b"] = np.array([1, 2, 3])
-        y = {}
+        y: dict = {}
         y["b"] = np.array([1.0, 2.0, 3.0])
         y["a"] = 3
         assert utils.dict_equiv(x, y)  # two nparrays are equal
@@ -129,11 +130,11 @@ class TestDictionaries:
     def test_frozen(self):
         x = utils.Frozen(self.x)
         with pytest.raises(TypeError):
-            x["foo"] = "bar"
+            x["foo"] = "bar"  # type: ignore[index]
         with pytest.raises(TypeError):
-            del x["a"]
+            del x["a"]  # type: ignore[attr-defined]
         with pytest.raises(AttributeError):
-            x.update(self.y)
+            x.update(self.y)  # type: ignore[attr-defined]
         assert x.mapping == self.x
         assert repr(x) in (
             "Frozen({'a': 'A', 'b': 'B'})",
@@ -151,6 +152,13 @@ class TestDictionaries:
         assert dict(x) == {"a": 1}
 
 
+def test_flat_items() -> None:
+    mapping = {"x": {"y": 1, "z": 2}, "x/y": 3}
+    actual = list(flat_items(mapping))
+    expected = [("x/y", 1), ("x/z", 2), ("x/y", 3)]
+    assert actual == expected
+
+
 def test_repr_object():
     obj = utils.ReprObject("foo")
     assert repr(obj) == "foo"
@@ -231,11 +239,11 @@ def test_hidden_key_dict():
 
 
 def test_either_dict_or_kwargs():
-    result = either_dict_or_kwargs(dict(a=1), None, "foo")
+    result = either_dict_or_kwargs(dict(a=1), {}, "foo")
     expected = dict(a=1)
     assert result == expected
 
-    result = either_dict_or_kwargs(None, dict(a=1), "foo")
+    result = either_dict_or_kwargs({}, dict(a=1), "foo")
     expected = dict(a=1)
     assert result == expected
 
@@ -300,7 +308,7 @@ def test_parse_dims_set() -> None:
 @pytest.mark.parametrize(
     "dim", [pytest.param(None, id="None"), pytest.param(..., id="ellipsis")]
 )
-def test_parse_dims_replace_none(dim: None | EllipsisType) -> None:
+def test_parse_dims_replace_none(dim: EllipsisType | None) -> None:
     all_dims = ("a", "b", 1, ("b", "c"))  # selection of different Hashables
     actual = utils.parse_dims_as_tuple(dim, all_dims, replace_none=True)
     assert actual == all_dims
diff -pruN 2025.03.1-8/xarray/tests/test_variable.py 2025.10.1-1/xarray/tests/test_variable.py
--- 2025.03.1-8/xarray/tests/test_variable.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/xarray/tests/test_variable.py	2025-10-10 10:38:05.000000000 +0000
@@ -5,7 +5,7 @@ from abc import ABC
 from copy import copy, deepcopy
 from datetime import datetime, timedelta
 from textwrap import dedent
-from typing import Generic
+from typing import Any, Generic
 
 import numpy as np
 import pandas as pd
@@ -15,6 +15,7 @@ import pytz
 from xarray import DataArray, Dataset, IndexVariable, Variable, set_options
 from xarray.core import dtypes, duck_array_ops, indexing
 from xarray.core.common import full_like, ones_like, zeros_like
+from xarray.core.extension_array import PandasExtensionArray
 from xarray.core.indexing import (
     BasicIndexer,
     CopyOnWriteArray,
@@ -333,10 +334,10 @@ class VariableSubclassobjects(NamedArray
         v = self.cls(["x"], pd.period_range(start="2000", periods=20, freq="D"))
         v = v.load()  # for dask-based Variable
         assert v[0] == pd.Period("2000", freq="D")
-        assert "Period('2000-01-01', 'D')" in repr(v)
+        assert "PeriodArray" in repr(v)
 
     @pytest.mark.parametrize("dtype", [float, int])
-    def test_1d_math(self, dtype: np.typing.DTypeLike) -> None:
+    def test_1d_math(self, dtype: np.typing.DTypeLike | None) -> None:
         x = np.arange(5, dtype=dtype)
         y = np.ones(5, dtype=dtype)
 
@@ -656,7 +657,7 @@ class VariableSubclassobjects(NamedArray
         data = pd.Categorical(np.arange(10, dtype="int64"))
         v = self.cls("x", data)
         print(v)  # should not error
-        assert v.dtype == "int64"
+        assert v.dtype == data.dtype
 
     def test_pandas_datetime64_with_tz(self):
         data = pd.date_range(
@@ -667,9 +668,12 @@ class VariableSubclassobjects(NamedArray
         )
         v = self.cls("x", data)
         print(v)  # should not error
-        if "America/New_York" in str(data.dtype):
-            # pandas is new enough that it has datetime64 with timezone dtype
-            assert v.dtype == "object"
+        if v.dtype == np.dtype("O"):
+            import dask.array as da
+
+            assert isinstance(v.data, da.Array)
+        else:
+            assert v.dtype == data.dtype
 
     def test_multiindex(self):
         idx = pd.MultiIndex.from_product([list("abc"), [0, 1]])
@@ -718,9 +722,9 @@ class VariableSubclassobjects(NamedArray
         assert_array_equal(v_new, expected)
 
         # with boolean variable with wrong shape
-        ind = np.array([True, False])
+        ind2: np.ndarray[Any, np.dtype[np.bool_]] = np.array([True, False])
         with pytest.raises(IndexError, match=r"Boolean array size 2 is "):
-            v[Variable(("a", "b"), [[0, 1]]), ind]
+            v[Variable(("a", "b"), [[0, 1]]), ind2]
 
         # boolean indexing with different dimension
         ind = Variable(["a"], [True, False, False])
@@ -1068,11 +1072,11 @@ class TestVariable(VariableSubclassobjec
 
     def test_numpy_same_methods(self):
         v = Variable([], np.float32(0.0))
-        assert v.item() == 0
-        assert type(v.item()) is float
+        assert v.item() == 0  # type: ignore[attr-defined]
+        assert type(v.item()) is float  # type: ignore[attr-defined]
 
         v = IndexVariable("x", np.arange(5))
-        assert 2 == v.searchsorted(2)
+        assert 2 == v.searchsorted(2)  # type: ignore[attr-defined]
 
     @pytest.mark.parametrize(
         "values, unit",
@@ -1124,7 +1128,7 @@ class TestVariable(VariableSubclassobjec
         v = Variable([], pd.Timestamp("2000-01-01"))
         expected_unit = "s" if has_pandas_3 else "ns"
         assert v.dtype == np.dtype(f"datetime64[{expected_unit}]")
-        assert v.values == np.datetime64("2000-01-01", expected_unit)
+        assert v.values == np.datetime64("2000-01-01", expected_unit)  # type: ignore[call-overload]
 
     @pytest.mark.parametrize(
         "values, unit", [(pd.to_timedelta("1s"), "ns"), (np.timedelta64(1, "s"), "s")]
@@ -1235,10 +1239,12 @@ class TestVariable(VariableSubclassobjec
         expected = Variable([], 0)
         assert_identical(expected, actual)
 
-        data = np.arange(9).reshape((3, 3))
-        expected = Variable(("x", "y"), data)
+        data2: np.ndarray[tuple[int, int], np.dtype[np.signedinteger[Any]]] = np.arange(
+            9
+        ).reshape((3, 3))
+        expected = Variable(("x", "y"), data2)
         with pytest.raises(ValueError, match=r"without explicit dimension names"):
-            as_variable(data, name="x")
+            as_variable(data2, name="x")
 
         # name of nD variable matches dimension name
         actual = as_variable(expected, name="x")
@@ -1319,7 +1325,7 @@ class TestVariable(VariableSubclassobjec
         v = Variable(["x", "y"], data)
 
         def assert_indexer_type(key, object_type):
-            dims, index_tuple, new_order = v._broadcast_indexes(key)
+            _dims, index_tuple, _new_order = v._broadcast_indexes(key)
             assert isinstance(index_tuple, object_type)
 
         # should return BasicIndexer
@@ -1397,11 +1403,11 @@ class TestVariable(VariableSubclassobjec
         # list arguments
         v_new = v[[0]]
         assert v_new.dims == ("x", "y")
-        assert_array_equal(v_new, v._data[[0]])
+        assert_array_equal(v_new, v._data[[0]])  # type: ignore[call-overload]
 
         v_new = v[[]]
         assert v_new.dims == ("x", "y")
-        assert_array_equal(v_new, v._data[[]])
+        assert_array_equal(v_new, v._data[[]])  # type: ignore[call-overload]
 
         # dict arguments
         v_new = v[dict(x=0)]
@@ -1590,15 +1596,7 @@ class TestVariable(VariableSubclassobjec
         data = pd.Categorical(np.arange(10, dtype="int64"))
         v = self.cls("x", data)
         print(v)  # should not error
-        assert pd.api.types.is_extension_array_dtype(v.dtype)
-
-    def test_pandas_categorical_no_chunk(self):
-        data = pd.Categorical(np.arange(10, dtype="int64"))
-        v = self.cls("x", data)
-        with pytest.raises(
-            ValueError, match=r".*was found to be a Pandas ExtensionArray.*"
-        ):
-            v.chunk((5,))
+        assert isinstance(v.dtype, pd.CategoricalDtype)
 
     def test_squeeze(self):
         v = Variable(["x", "y"], [[1]])
@@ -1658,6 +1656,74 @@ class TestVariable(VariableSubclassobjec
         expected = Variable(["x"], exp_values)
         assert_identical(actual, expected)
 
+    def test_set_dims_without_broadcast(self):
+        class ArrayWithoutBroadcastTo(NDArrayMixin, indexing.ExplicitlyIndexed):
+            def __init__(self, array):
+                self.array = array
+
+            # Broadcasting with __getitem__ is "easier" to implement
+            # especially for dims of 1
+            def __getitem__(self, key):
+                return self.array[key]
+
+            def __array_function__(self, *args, **kwargs):
+                raise NotImplementedError(
+                    "Not we don't want to use broadcast_to here "
+                    "https://github.com/pydata/xarray/issues/9462"
+                )
+
+        arr = ArrayWithoutBroadcastTo(np.zeros((3, 4)))
+        # We should be able to add a new axis without broadcasting
+        assert arr[np.newaxis, :, :].shape == (1, 3, 4)
+        with pytest.raises(NotImplementedError):
+            np.broadcast_to(arr, (1, 3, 4))
+
+        v = Variable(["x", "y"], arr)
+        v_expanded = v.set_dims(["z", "x", "y"])
+        assert v_expanded.dims == ("z", "x", "y")
+        assert v_expanded.shape == (1, 3, 4)
+
+        v_expanded = v.set_dims(["x", "z", "y"])
+        assert v_expanded.dims == ("x", "z", "y")
+        assert v_expanded.shape == (3, 1, 4)
+
+        v_expanded = v.set_dims(["x", "y", "z"])
+        assert v_expanded.dims == ("x", "y", "z")
+        assert v_expanded.shape == (3, 4, 1)
+
+        # Explicitly asking for a shape of 1 triggers a different
+        # codepath in set_dims
+        # https://github.com/pydata/xarray/issues/9462
+        v_expanded = v.set_dims(["z", "x", "y"], shape=(1, 3, 4))
+        assert v_expanded.dims == ("z", "x", "y")
+        assert v_expanded.shape == (1, 3, 4)
+
+        v_expanded = v.set_dims(["x", "z", "y"], shape=(3, 1, 4))
+        assert v_expanded.dims == ("x", "z", "y")
+        assert v_expanded.shape == (3, 1, 4)
+
+        v_expanded = v.set_dims(["x", "y", "z"], shape=(3, 4, 1))
+        assert v_expanded.dims == ("x", "y", "z")
+        assert v_expanded.shape == (3, 4, 1)
+
+        v_expanded = v.set_dims({"z": 1, "x": 3, "y": 4})
+        assert v_expanded.dims == ("z", "x", "y")
+        assert v_expanded.shape == (1, 3, 4)
+
+        v_expanded = v.set_dims({"x": 3, "z": 1, "y": 4})
+        assert v_expanded.dims == ("x", "z", "y")
+        assert v_expanded.shape == (3, 1, 4)
+
+        v_expanded = v.set_dims({"x": 3, "y": 4, "z": 1})
+        assert v_expanded.dims == ("x", "y", "z")
+        assert v_expanded.shape == (3, 4, 1)
+
+        with pytest.raises(NotImplementedError):
+            v.set_dims({"z": 2, "x": 3, "y": 4})
+
+        with pytest.raises(NotImplementedError):
+            v.set_dims(["z", "x", "y"], shape=(2, 3, 4))
+
     def test_stack(self):
         v = Variable(["x", "y"], [[0, 1], [2, 3]], {"foo": "bar"})
         actual = v.stack(z=("x", "y"))
@@ -1864,6 +1930,9 @@ class TestVariable(VariableSubclassobjec
 
         np.testing.assert_allclose(actual.values, expected)
 
+    @pytest.mark.filterwarnings(
+        "default:The `interpolation` argument to quantile was renamed to `method`:FutureWarning"
+    )
     @pytest.mark.parametrize("method", ["midpoint", "lower"])
     def test_quantile_interpolation_deprecation(self, method) -> None:
         v = Variable(["x", "y"], self.d)
@@ -2299,7 +2368,7 @@ class TestVariableWithDask(VariableSubcl
         assert blocked.chunks == ((3,), (4,))
         first_dask_name = blocked.data.name
 
-        blocked = unblocked.chunk(chunks=((2, 1), (2, 2)))
+        blocked = unblocked.chunk(chunks=((2, 1), (2, 2)))  # type: ignore[arg-type]
         assert blocked.chunks == ((2, 1), (2, 2))
         assert blocked.data.name != first_dask_name
 
@@ -2412,10 +2481,17 @@ class TestVariableWithDask(VariableSubcl
     def test_pad(self, mode, xr_arg, np_arg):
         super().test_pad(mode, xr_arg, np_arg)
 
+    @pytest.mark.skip(reason="dask doesn't support extension arrays")
+    def test_pandas_period_index(self):
+        super().test_pandas_period_index()
+
+    @pytest.mark.skip(reason="dask doesn't support extension arrays")
+    def test_pandas_datetime64_with_tz(self):
+        super().test_pandas_datetime64_with_tz()
+
+    @pytest.mark.skip(reason="dask doesn't support extension arrays")
     def test_pandas_categorical_dtype(self):
-        data = pd.Categorical(np.arange(10, dtype="int64"))
-        with pytest.raises(ValueError, match="was found to be a Pandas ExtensionArray"):
-            self.cls("x", data)
+        super().test_pandas_categorical_dtype()
 
 
 @requires_sparse
@@ -2444,7 +2520,8 @@ class TestIndexVariable(VariableSubclass
 
     def test_to_index_multiindex_level(self):
         midx = pd.MultiIndex.from_product([["a", "b"], [1, 2]], names=("one", "two"))
-        ds = Dataset(coords={"x": midx})
+        with pytest.warns(FutureWarning):
+            ds = Dataset(coords={"x": midx})
         assert ds.one.variable.to_index().equals(midx.get_level_values("one"))
 
     def test_multiindex_default_level_names(self):
@@ -2452,7 +2529,7 @@ class TestIndexVariable(VariableSubclass
         v = IndexVariable(["x"], midx, {"foo": "bar"})
         assert v.to_index().names == ("x_level_0", "x_level_1")
 
-    def test_data(self):
+    def test_data(self):  # type: ignore[override]
         x = IndexVariable("x", np.arange(3.0))
         assert isinstance(x._data, PandasIndexingAdapter)
         assert isinstance(x.data, np.ndarray)
@@ -2585,7 +2662,7 @@ class TestIndexVariable(VariableSubclass
 
     @pytest.mark.skip
     def test_coarsen_2d(self):
-        super().test_coarsen_2d()
+        super().test_coarsen_2d()  # type: ignore[misc]
 
     def test_to_index_variable_copy(self) -> None:
         # to_index_variable should return a copy
@@ -2606,7 +2683,7 @@ class TestAsCompatibleData(Generic[T_Duc
                 pd.date_range("2000-01-01", periods=3),
                 pd.date_range("2000-01-01", periods=3).values,
             ]:
-                x = t(data)
+                x = t(data)  # type: ignore[arg-type]
                 assert source_ndarray(x) is source_ndarray(as_compatible_data(x))
 
     def test_converted_types(self):
@@ -2622,51 +2699,51 @@ class TestAsCompatibleData(Generic[T_Duc
             assert np.asarray(input_array).dtype == actual.dtype
 
     def test_masked_array(self):
-        original = np.ma.MaskedArray(np.arange(5))
+        original: Any = np.ma.MaskedArray(np.arange(5))
         expected = np.arange(5)
-        actual = as_compatible_data(original)
+        actual: Any = as_compatible_data(original)
         assert_array_equal(expected, actual)
         assert np.dtype(int) == actual.dtype
 
-        original = np.ma.MaskedArray(np.arange(5), mask=4 * [False] + [True])
-        expected = np.arange(5.0)
-        expected[-1] = np.nan
-        actual = as_compatible_data(original)
-        assert_array_equal(expected, actual)
+        original1: Any = np.ma.MaskedArray(np.arange(5), mask=4 * [False] + [True])
+        expected1: Any = np.arange(5.0)
+        expected1[-1] = np.nan
+        actual = as_compatible_data(original1)
+        assert_array_equal(expected1, actual)
         assert np.dtype(float) == actual.dtype
 
-        original = np.ma.MaskedArray([1.0, 2.0], mask=[True, False])
-        original.flags.writeable = False
-        expected = [np.nan, 2.0]
-        actual = as_compatible_data(original)
-        assert_array_equal(expected, actual)
+        original2: Any = np.ma.MaskedArray([1.0, 2.0], mask=[True, False])
+        original2.flags.writeable = False
+        expected2: Any = [np.nan, 2.0]
+        actual = as_compatible_data(original2)
+        assert_array_equal(expected2, actual)
         assert np.dtype(float) == actual.dtype
 
         # GH2377
-        actual = Variable(dims=tuple(), data=np.ma.masked)
-        expected = Variable(dims=tuple(), data=np.nan)
-        assert_array_equal(expected, actual)
-        assert actual.dtype == expected.dtype
+        actual_var: Any = Variable(dims=tuple(), data=np.ma.masked)
+        expected_var = Variable(dims=tuple(), data=np.nan)
+        assert_array_equal(expected_var, actual_var)
+        assert actual_var.dtype == expected_var.dtype
 
     def test_datetime(self):
         expected = np.datetime64("2000-01-01")
-        actual = as_compatible_data(expected)
+        actual: Any = as_compatible_data(expected)
         assert expected == actual
         assert np.ndarray is type(actual)
         assert np.dtype("datetime64[s]") == actual.dtype
 
-        expected = np.array([np.datetime64("2000-01-01")])
-        actual = as_compatible_data(expected)
-        assert np.asarray(expected) == actual
+        expected_dt: Any = np.array([np.datetime64("2000-01-01")])
+        actual = as_compatible_data(expected_dt)
+        assert np.asarray(expected_dt) == actual
         assert np.ndarray is type(actual)
         assert np.dtype("datetime64[s]") == actual.dtype
 
-        expected = np.array([np.datetime64("2000-01-01", "ns")])
-        actual = as_compatible_data(expected)
-        assert np.asarray(expected) == actual
+        expected_dt_ns: Any = np.array([np.datetime64("2000-01-01", "ns")])
+        actual = as_compatible_data(expected_dt_ns)
+        assert np.asarray(expected_dt_ns) == actual
         assert np.ndarray is type(actual)
         assert np.dtype("datetime64[ns]") == actual.dtype
-        assert expected is source_ndarray(np.asarray(actual))
+        assert expected_dt_ns is source_ndarray(np.asarray(actual))
 
         expected = np.datetime64(
             "2000-01-01",
@@ -2704,11 +2781,11 @@ class TestAsCompatibleData(Generic[T_Duc
 
         expect = orig.copy(deep=True)
         # see https://github.com/python/mypy/issues/3004 for why we need to ignore type
-        expect.values = [[2.0, 2.0], [2.0, 2.0]]  # type: ignore[assignment]
+        expect.values = [[2.0, 2.0], [2.0, 2.0]]  # type: ignore[assignment,unused-ignore]
         assert_identical(expect, full_like(orig, 2))
 
         # override dtype
-        expect.values = [[True, True], [True, True]]  # type: ignore[assignment]
+        expect.values = [[True, True], [True, True]]  # type: ignore[assignment,unused-ignore]
         assert expect.dtype == bool
         assert_identical(expect, full_like(orig, True, dtype=bool))
 
@@ -2769,19 +2846,19 @@ class TestAsCompatibleData(Generic[T_Duc
         class SubclassedArray(np.ndarray):
             def __new__(cls, array, foo):
                 obj = np.asarray(array).view(cls)
-                obj.foo = foo
+                obj.foo = foo  # type: ignore[attr-defined]
                 return obj
 
         data = SubclassedArray([1, 2, 3], foo="bar")
-        actual = as_compatible_data(data)
+        actual: Any = as_compatible_data(data)
         assert isinstance(actual, SubclassedArray)
-        assert actual.foo == "bar"
+        assert actual.foo == "bar"  # type: ignore[attr-defined]
         assert_array_equal(data, actual)
 
     def test_numpy_matrix(self):
         with pytest.warns(PendingDeprecationWarning):
             data = np.matrix([[1, 2], [3, 4]])
-        actual = as_compatible_data(data)
+        actual: Any = as_compatible_data(data)
         assert isinstance(actual, np.ndarray)
         assert_array_equal(data, actual)
 
@@ -2807,9 +2884,9 @@ class TestAsCompatibleData(Generic[T_Duc
         orig = Variable(dims=("x"), data=array, attrs={"foo": "bar"})
         assert isinstance(orig._data, CustomIndexable)
 
-        array = CustomWithValuesAttr(np.arange(3))
-        orig = Variable(dims=(), data=array)
-        assert isinstance(orig._data.item(), CustomWithValuesAttr)
+        array2: Any = CustomWithValuesAttr(np.arange(3))
+        orig = Variable(dims=(), data=array2)
+        assert isinstance(orig._data.item(), CustomWithValuesAttr)  # type: ignore[union-attr]
 
 
 def test_raise_no_warning_for_nan_in_binary_ops():
@@ -2823,71 +2900,107 @@ class TestBackendIndexing:
     @pytest.fixture(autouse=True)
     def setUp(self):
         self.d = np.random.random((10, 3)).astype(np.float64)
+        self.cat = PandasExtensionArray(pd.Categorical(["a", "b"] * 5))
 
-    def check_orthogonal_indexing(self, v):
-        assert np.allclose(v.isel(x=[8, 3], y=[2, 1]), self.d[[8, 3]][:, [2, 1]])
+    async def check_orthogonal_indexing(self, v, load_async):
+        expected = self.d[[8, 3]][:, [2, 1]]
+
+        if load_async:
+            result = await v.isel(x=[8, 3], y=[2, 1]).load_async()
+        else:
+            result = v.isel(x=[8, 3], y=[2, 1])
 
-    def check_vectorized_indexing(self, v):
+        assert np.allclose(result, expected)
+
+    async def check_vectorized_indexing(self, v, load_async):
         ind_x = Variable("z", [0, 2])
         ind_y = Variable("z", [2, 1])
-        assert np.allclose(v.isel(x=ind_x, y=ind_y), self.d[ind_x, ind_y])
+        expected = self.d[ind_x, ind_y]
+
+        if load_async:
+            result = await v.isel(x=ind_x, y=ind_y).load_async()
+        else:
+            result = v.isel(x=ind_x, y=ind_y).load()
+
+        assert np.allclose(result, expected)
 
-    def test_NumpyIndexingAdapter(self):
+    @pytest.mark.asyncio
+    @pytest.mark.parametrize("load_async", [True, False])
+    async def test_NumpyIndexingAdapter(self, load_async):
         v = Variable(dims=("x", "y"), data=NumpyIndexingAdapter(self.d))
-        self.check_orthogonal_indexing(v)
-        self.check_vectorized_indexing(v)
+        await self.check_orthogonal_indexing(v, load_async)
+        await self.check_vectorized_indexing(v, load_async)
         # could not doubly wrapping
         with pytest.raises(TypeError, match=r"NumpyIndexingAdapter only wraps "):
             v = Variable(
                 dims=("x", "y"), data=NumpyIndexingAdapter(NumpyIndexingAdapter(self.d))
             )
 
-    def test_LazilyIndexedArray(self):
+    def test_extension_array_duck_array(self):
+        lazy = LazilyIndexedArray(self.cat)
+        assert (lazy.get_duck_array().array == self.cat).all()
+
+    def test_extension_array_duck_indexed(self):
+        lazy = Variable(dims=("x"), data=LazilyIndexedArray(self.cat))
+        assert (lazy[[0, 1, 5]] == ["a", "b", "b"]).all()
+
+    @pytest.mark.asyncio
+    @pytest.mark.parametrize("load_async", [True, False])
+    async def test_LazilyIndexedArray(self, load_async):
         v = Variable(dims=("x", "y"), data=LazilyIndexedArray(self.d))
-        self.check_orthogonal_indexing(v)
-        self.check_vectorized_indexing(v)
+        await self.check_orthogonal_indexing(v, load_async)
+        await self.check_vectorized_indexing(v, load_async)
         # doubly wrapping
         v = Variable(
             dims=("x", "y"),
             data=LazilyIndexedArray(LazilyIndexedArray(self.d)),
         )
-        self.check_orthogonal_indexing(v)
+        await self.check_orthogonal_indexing(v, load_async)
         # hierarchical wrapping
         v = Variable(
             dims=("x", "y"), data=LazilyIndexedArray(NumpyIndexingAdapter(self.d))
         )
-        self.check_orthogonal_indexing(v)
+        await self.check_orthogonal_indexing(v, load_async)
 
-    def test_CopyOnWriteArray(self):
+    @pytest.mark.asyncio
+    @pytest.mark.parametrize("load_async", [True, False])
+    async def test_CopyOnWriteArray(self, load_async):
         v = Variable(dims=("x", "y"), data=CopyOnWriteArray(self.d))
-        self.check_orthogonal_indexing(v)
-        self.check_vectorized_indexing(v)
+        await self.check_orthogonal_indexing(v, load_async)
+        await self.check_vectorized_indexing(v, load_async)
         # doubly wrapping
-        v = Variable(dims=("x", "y"), data=CopyOnWriteArray(LazilyIndexedArray(self.d)))
-        self.check_orthogonal_indexing(v)
-        self.check_vectorized_indexing(v)
-
-    def test_MemoryCachedArray(self):
+        v = Variable(dims=("x", "y"), data=CopyOnWriteArray(LazilyIndexedArray(self.d)))  # type: ignore[arg-type]
+        await self.check_orthogonal_indexing(v, load_async)
+        await self.check_vectorized_indexing(v, load_async)
+
+    @pytest.mark.asyncio
+    @pytest.mark.parametrize("load_async", [True, False])
+    async def test_MemoryCachedArray(self, load_async):
         v = Variable(dims=("x", "y"), data=MemoryCachedArray(self.d))
-        self.check_orthogonal_indexing(v)
-        self.check_vectorized_indexing(v)
+        await self.check_orthogonal_indexing(v, load_async)
+        await self.check_vectorized_indexing(v, load_async)
         # doubly wrapping
-        v = Variable(dims=("x", "y"), data=CopyOnWriteArray(MemoryCachedArray(self.d)))
-        self.check_orthogonal_indexing(v)
-        self.check_vectorized_indexing(v)
+        v = Variable(dims=("x", "y"), data=CopyOnWriteArray(MemoryCachedArray(self.d)))  # type: ignore[arg-type]
+        await self.check_orthogonal_indexing(v, load_async)
+        await self.check_vectorized_indexing(v, load_async)
 
     @requires_dask
-    def test_DaskIndexingAdapter(self):
+    @pytest.mark.asyncio
+    @pytest.mark.parametrize("load_async", [True, False])
+    async def test_DaskIndexingAdapter(self, load_async):
         import dask.array as da
 
-        da = da.asarray(self.d)
-        v = Variable(dims=("x", "y"), data=DaskIndexingAdapter(da))
-        self.check_orthogonal_indexing(v)
-        self.check_vectorized_indexing(v)
+        dask_array = da.asarray(self.d)
+        v = Variable(dims=("x", "y"), data=DaskIndexingAdapter(dask_array))
+        await self.check_orthogonal_indexing(v, load_async)
+        await self.check_vectorized_indexing(v, load_async)
         # doubly wrapping
-        v = Variable(dims=("x", "y"), data=CopyOnWriteArray(DaskIndexingAdapter(da)))
-        self.check_orthogonal_indexing(v)
-        self.check_vectorized_indexing(v)
+        v = Variable(
+            dims=("x", "y"),
+            data=CopyOnWriteArray(DaskIndexingAdapter(dask_array)),  # type: ignore[arg-type]
+        )
+        await self.check_orthogonal_indexing(v, load_async)
+        await self.check_vectorized_indexing(v, load_async)
 
 
 def test_clip(var):
@@ -3020,7 +3133,7 @@ def test_datetime_conversion(values, uni
     # todo: check for redundancy (suggested per review)
     dims = ["time"] if isinstance(values, np.ndarray | pd.Index | pd.Series) else []
     var = Variable(dims, values)
-    if var.dtype.kind == "M":
+    if var.dtype.kind == "M" and isinstance(var.dtype, np.dtype):
         assert var.dtype == np.dtype(f"datetime64[{unit}]")
     else:
         # The only case where a non-datetime64 dtype can occur currently is in
@@ -3062,8 +3175,12 @@ def test_pandas_two_only_datetime_conver
     # todo: check for redundancy (suggested per review)
     var = Variable(["time"], data.astype(dtype))  # type: ignore[arg-type]
 
-    if var.dtype.kind == "M":
+    # we internally convert series to numpy representations to avoid too much nastiness with extension arrays
+    # when calling data.array e.g., with NumpyExtensionArrays
+    if isinstance(data, pd.Series):
         assert var.dtype == np.dtype("datetime64[s]")
+    elif var.dtype.kind == "M":
+        assert var.dtype == dtype
     else:
         # The only case where a non-datetime64 dtype can occur currently is in
         # the case that the variable is backed by a timezone-aware
diff -pruN 2025.03.1-8/xarray/tests/test_weighted.py 2025.10.1-1/xarray/tests/test_weighted.py
--- 2025.03.1-8/xarray/tests/test_weighted.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/xarray/tests/test_weighted.py	2025-10-10 10:38:05.000000000 +0000
@@ -34,7 +34,7 @@ def test_weighted_weights_nan_raises(as_
     if as_dataset:
         data = data.to_dataset(name="data")
 
-    with pytest.raises(ValueError, match="`weights` cannot contain missing values."):
+    with pytest.raises(ValueError, match=r"`weights` cannot contain missing values."):
         data.weighted(DataArray(weights))
 
 
@@ -42,7 +42,7 @@ def test_weighted_weights_nan_raises(as_
 @pytest.mark.parametrize("as_dataset", (True, False))
 @pytest.mark.parametrize("weights", ([np.nan, 2], [np.nan, np.nan]))
 def test_weighted_weights_nan_raises_dask(as_dataset, weights):
-    data = DataArray([1, 2]).chunk({"dim_0": -1})
+    data: DataArray | Dataset = DataArray([1, 2]).chunk({"dim_0": -1})
     if as_dataset:
         data = data.to_dataset(name="data")
 
@@ -51,7 +51,7 @@ def test_weighted_weights_nan_raises_das
     with raise_if_dask_computes():
         weighted = data.weighted(weights)
 
-    with pytest.raises(ValueError, match="`weights` cannot contain missing values."):
+    with pytest.raises(ValueError, match=r"`weights` cannot contain missing values."):
         weighted.sum().load()
 
 
@@ -603,19 +603,21 @@ def test_weighted_operations_3D(dim, add
 
     weights = DataArray(np.random.randn(4, 4, 4), dims=dims, coords=coords)
 
-    data = np.random.randn(4, 4, 4)
+    data_values = np.random.randn(4, 4, 4)
 
     # add approximately 25 % NaNs (https://stackoverflow.com/a/32182680/3010700)
     if add_nans:
-        c = int(data.size * 0.25)
-        data.ravel()[np.random.choice(data.size, c, replace=False)] = np.nan
+        c = int(data_values.size * 0.25)
+        data_values.ravel()[np.random.choice(data_values.size, c, replace=False)] = (
+            np.nan
+        )
 
-    data = DataArray(data, dims=dims, coords=coords)
+    data = DataArray(data_values, dims=dims, coords=coords)
 
     check_weighted_operations(data, weights, dim, skipna)
 
-    data = data.to_dataset(name="data")
-    check_weighted_operations(data, weights, dim, skipna)
+    ds = data.to_dataset(name="data")
+    check_weighted_operations(ds, weights, dim, skipna)
 
 
 @pytest.mark.parametrize("dim", ("a", "b", "c", ("a", "b"), ("a", "b", "c"), None))
@@ -704,21 +706,23 @@ def test_weighted_operations_different_s
 ):
     weights = DataArray(np.random.randn(*shape_weights))
 
-    data = np.random.randn(*shape_data)
+    data_values = np.random.randn(*shape_data)
 
     # add approximately 25 % NaNs
     if add_nans:
-        c = int(data.size * 0.25)
-        data.ravel()[np.random.choice(data.size, c, replace=False)] = np.nan
+        c = int(data_values.size * 0.25)
+        data_values.ravel()[np.random.choice(data_values.size, c, replace=False)] = (
+            np.nan
+        )
 
-    data = DataArray(data)
+    data = DataArray(data_values)
 
     check_weighted_operations(data, weights, "dim_0", skipna)
     check_weighted_operations(data, weights, None, skipna)
 
-    data = data.to_dataset(name="data")
-    check_weighted_operations(data, weights, "dim_0", skipna)
-    check_weighted_operations(data, weights, None, skipna)
+    ds = data.to_dataset(name="data")
+    check_weighted_operations(ds, weights, "dim_0", skipna)
+    check_weighted_operations(ds, weights, None, skipna)
 
 
 @pytest.mark.parametrize(
@@ -729,7 +733,7 @@ def test_weighted_operations_different_s
 @pytest.mark.parametrize("keep_attrs", (True, False, None))
 def test_weighted_operations_keep_attr(operation, as_dataset, keep_attrs):
     weights = DataArray(np.random.randn(2, 2), attrs=dict(attr="weights"))
-    data = DataArray(np.random.randn(2, 2))
+    data: DataArray | Dataset = DataArray(np.random.randn(2, 2))
 
     if as_dataset:
         data = data.to_dataset(name="data")
@@ -758,10 +762,10 @@ def test_weighted_operations_keep_attr_d
     # GH #3595
 
     weights = DataArray(np.random.randn(2, 2))
-    data = DataArray(np.random.randn(2, 2), attrs=dict(attr="data"))
-    data = data.to_dataset(name="a")
+    da = DataArray(np.random.randn(2, 2), attrs=dict(attr="data"))
+    data = da.to_dataset(name="a")
 
-    kwargs = {"keep_attrs": True}
+    kwargs: dict[str, Any] = {"keep_attrs": True}
     if operation == "quantile":
         kwargs["q"] = 0.5
 
@@ -770,15 +774,27 @@ def test_weighted_operations_keep_attr_d
     assert data.a.attrs == result.a.attrs
 
 
+def test_weighted_mean_keep_attrs_ds():
+    weights = DataArray(np.random.randn(2))
+    data = Dataset(
+        {"a": (["dim_0", "dim_1"], np.random.randn(2, 2), dict(attr="data"))},
+        coords={"dim_1": ("dim_1", ["a", "b"], {"attr1": "value1"})},
+    )
+
+    result = data.weighted(weights).mean(dim="dim_0", keep_attrs=True)
+    assert data.coords["dim_1"].attrs == result.coords["dim_1"].attrs
+
+
 @pytest.mark.parametrize("operation", ("sum_of_weights", "sum", "mean", "quantile"))
 @pytest.mark.parametrize("as_dataset", (True, False))
 def test_weighted_bad_dim(operation, as_dataset):
-    data = DataArray(np.random.randn(2, 2))
-    weights = xr.ones_like(data)
+    data_array = DataArray(np.random.randn(2, 2))
+    weights = xr.ones_like(data_array)
+    data: DataArray | Dataset = data_array
     if as_dataset:
-        data = data.to_dataset(name="data")
+        data = data_array.to_dataset(name="data")
 
-    kwargs = {"dim": "bad_dim"}
+    kwargs: dict[str, Any] = {"dim": "bad_dim"}
     if operation == "quantile":
         kwargs["q"] = 0.5
 
diff -pruN 2025.03.1-8/xarray/tutorial.py 2025.10.1-1/xarray/tutorial.py
--- 2025.03.1-8/xarray/tutorial.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/xarray/tutorial.py	2025-10-10 10:38:05.000000000 +0000
@@ -85,7 +85,7 @@ def _check_netcdf_engine_installed(name)
 def open_dataset(
     name: str,
     cache: bool = True,
-    cache_dir: None | str | os.PathLike = None,
+    cache_dir: str | os.PathLike | None = None,
     *,
     engine: T_Engine = None,
     **kws,
@@ -216,7 +216,7 @@ def load_dataset(*args, **kwargs) -> Dat
         return ds.load()
 
 
-def scatter_example_dataset(*, seed: None | int = None) -> Dataset:
+def scatter_example_dataset(*, seed: int | None = None) -> Dataset:
     """
     Create an example dataset.
 
@@ -255,7 +255,7 @@ def scatter_example_dataset(*, seed: Non
 def open_datatree(
     name: str,
     cache: bool = True,
-    cache_dir: None | str | os.PathLike = None,
+    cache_dir: str | os.PathLike | None = None,
     *,
     engine: T_Engine = None,
     **kws,
diff -pruN 2025.03.1-8/xarray/typing.py 2025.10.1-1/xarray/typing.py
--- 2025.03.1-8/xarray/typing.py	1970-01-01 00:00:00.000000000 +0000
+++ 2025.10.1-1/xarray/typing.py	2025-10-10 10:38:05.000000000 +0000
@@ -0,0 +1,23 @@
+"""
+Public typing utilities for use by external libraries.
+"""
+
+from xarray.computation.rolling import (
+    DataArrayCoarsen,
+    DataArrayRolling,
+    DatasetRolling,
+)
+from xarray.computation.weighted import DataArrayWeighted, DatasetWeighted, Weighted
+from xarray.core.groupby import DataArrayGroupBy
+from xarray.core.resample import DataArrayResample
+
+__all__ = [
+    "DataArrayCoarsen",
+    "DataArrayGroupBy",
+    "DataArrayResample",
+    "DataArrayRolling",
+    "DataArrayWeighted",
+    "DatasetRolling",
+    "DatasetWeighted",
+    "Weighted",
+]
diff -pruN 2025.03.1-8/xarray/ufuncs.py 2025.10.1-1/xarray/ufuncs.py
--- 2025.03.1-8/xarray/ufuncs.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/xarray/ufuncs.py	2025-10-10 10:38:05.000000000 +0000
@@ -39,7 +39,7 @@ def get_array_namespace(*args):
         names = [module.__name__ for module in xps]
         raise ValueError(f"Mixed array types {names} are not supported.")
 
-    return next(iter(xps)) if len(xps) else np
+    return next(iter(xps)) if xps else np
 
 
 class _ufunc_wrapper(ABC):
diff -pruN 2025.03.1-8/xarray/util/deprecation_helpers.py 2025.10.1-1/xarray/util/deprecation_helpers.py
--- 2025.03.1-8/xarray/util/deprecation_helpers.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/xarray/util/deprecation_helpers.py	2025-10-10 10:38:05.000000000 +0000
@@ -35,8 +35,9 @@ import inspect
 import warnings
 from collections.abc import Callable
 from functools import wraps
-from typing import TypeVar
+from typing import Any, Self, TypeVar
 
+from xarray.core.options import OPTIONS
 from xarray.core.utils import emit_user_level_warning
 
 T = TypeVar("T", bound=Callable)
@@ -145,3 +146,76 @@ def deprecate_dims(func: T, old_name="di
     # We're quite confident we're just returning `T` from this function, so it's fine to ignore typing
     # within the function.
     return wrapper  # type: ignore[return-value]
+
+
+class CombineKwargDefault:
+    """Object that handles deprecation cycle for kwarg default values.
+
+    Similar to ReprObject
+    """
+
+    _old: str
+    _new: str | None
+    _name: str
+
+    def __init__(self, *, name: str, old: str, new: str | None):
+        self._name = name
+        self._old = old
+        self._new = new
+
+    def __repr__(self) -> str:
+        return str(self._value)
+
+    def __eq__(self, other: Self | Any) -> bool:
+        return (
+            self._value == other._value
+            if isinstance(other, type(self))
+            else self._value == other
+        )
+
+    @property
+    def _value(self) -> str | None:
+        return self._new if OPTIONS["use_new_combine_kwarg_defaults"] else self._old
+
+    def __hash__(self) -> int:
+        return hash(self._value)
+
+    def __dask_tokenize__(self) -> object:
+        from dask.base import normalize_token
+
+        return normalize_token((type(self), self._value))
+
+    def warning_message(self, message: str, recommend_set_options: bool = True) -> str:
+        if recommend_set_options:
+            recommendation = (
+                " To opt in to new defaults and get rid of these warnings now "
+                "use `set_options(use_new_combine_kwarg_defaults=True) or "
+                f"set {self._name} explicitly."
+            )
+        else:
+            recommendation = (
+                f" The recommendation is to set {self._name} explicitly for this case."
+            )
+
+        return (
+            f"In a future version of xarray the default value for {self._name} will "
+            f"change from {self._name}={self._old!r} to {self._name}={self._new!r}. "
+            + message
+            + recommendation
+        )
+
+    def error_message(self) -> str:
+        return (
+            f" Error might be related to new default (`{self._name}={self._new!r}`). "
+            f"Previously the default was `{self._name}={self._old!r}`. "
+            f"The recommendation is to set {self._name!r} explicitly for this case."
+        )
+
+
+_DATA_VARS_DEFAULT = CombineKwargDefault(name="data_vars", old="all", new=None)
+_COORDS_DEFAULT = CombineKwargDefault(name="coords", old="different", new="minimal")
+_COMPAT_CONCAT_DEFAULT = CombineKwargDefault(
+    name="compat", old="equals", new="override"
+)
+_COMPAT_DEFAULT = CombineKwargDefault(name="compat", old="no_conflicts", new="override")
+_JOIN_DEFAULT = CombineKwargDefault(name="join", old="outer", new="exact")
diff -pruN 2025.03.1-8/xarray/util/generate_aggregations.py 2025.10.1-1/xarray/util/generate_aggregations.py
--- 2025.03.1-8/xarray/util/generate_aggregations.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/xarray/util/generate_aggregations.py	2025-10-10 10:38:05.000000000 +0000
@@ -13,9 +13,9 @@ while replacing the doctests.
 
 """
 
-import collections
 import textwrap
 from dataclasses import dataclass, field
+from typing import NamedTuple
 
 MODULE_PREAMBLE = '''\
 """Mixin classes with reduction operations."""
@@ -215,7 +215,7 @@ _KWARGS_DOCSTRING = """**kwargs : Any
     function for calculating ``{method}`` on this object's data.
     These could include dask-specific kwargs like ``split_every``."""
 
-_NUMERIC_ONLY_NOTES = "Non-numeric variables will be removed prior to reducing."
+_NUMERIC_ONLY_NOTES = "Non-numeric variables will be removed prior to reducing. datetime64 and timedelta64 dtypes are treated as numeric for aggregation operations."
 
 _FLOX_NOTES_TEMPLATE = """Use the ``flox`` package to significantly speed up {kind} computations,
 especially with dask arrays. Xarray will use flox by default if installed.
@@ -227,7 +227,14 @@ _CUM_NOTES = """Note that the methods on
 and better supported. ``cumsum`` and ``cumprod`` may be deprecated
 in the future."""
 
-ExtraKwarg = collections.namedtuple("ExtraKwarg", "docs kwarg call example")
+
+class ExtraKwarg(NamedTuple):
+    docs: str
+    kwarg: str
+    call: str
+    example: str
+
+
 skipna = ExtraKwarg(
     docs=_SKIPNA_DOCSTRING,
     kwarg="skipna: bool | None = None,",
@@ -685,8 +692,7 @@ def write_methods(filepath, generators,
         f.write(preamble)
         for gen in generators:
             for lines in gen.generate_methods():
-                for line in lines:
-                    f.write(line + "\n")
+                f.writelines(line + "\n" for line in lines)
 
 
 if __name__ == "__main__":
diff -pruN 2025.03.1-8/xarray/util/generate_ops.py 2025.10.1-1/xarray/util/generate_ops.py
--- 2025.03.1-8/xarray/util/generate_ops.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/xarray/util/generate_ops.py	2025-10-10 10:38:05.000000000 +0000
@@ -133,7 +133,7 @@ unhashable = """
 # We require a "hack" to tell type checkers that e.g. Variable + DataArray = DataArray
 # In reality this returns NotImplemented, but this is not a valid type in python 3.9.
 # Therefore, we return DataArray. In reality this would call DataArray.__add__(Variable)
-# TODO: change once python 3.10 is the minimum.
+# TODO: change once python 3.11 is the minimum.
 #
 # Mypy seems to require that __iadd__ and __add__ have the same signature.
 # This requires some extra type: ignores[misc] in the inplace methods :/
@@ -222,32 +222,33 @@ def unops() -> list[OpsType]:
 # ruff does not reformat everything. When reformatting, the
 # type-ignores end up in the wrong line :/
 
-ops_info = {}
-# TODO add inplace ops for DataTree?
-ops_info["DataTreeOpsMixin"] = binops(other_type="DtCompatible") + unops()
-ops_info["DatasetOpsMixin"] = (
-    binops_overload(other_type="DsCompatible", overload_types=["DataTree"])
-    + inplace(other_type="DsCompatible", type_ignore="misc")
-    + unops()
-)
-ops_info["DataArrayOpsMixin"] = (
-    binops_overload(other_type="DaCompatible", overload_types=["Dataset", "DataTree"])
-    + inplace(other_type="DaCompatible", type_ignore="misc")
-    + unops()
-)
-ops_info["VariableOpsMixin"] = (
-    binops_overload(
-        other_type="VarCompatible", overload_types=["T_DA", "Dataset", "DataTree"]
-    )
-    + inplace(other_type="VarCompatible", type_ignore="misc")
-    + unops()
-)
-ops_info["DatasetGroupByOpsMixin"] = binops(
-    other_type="Dataset | DataArray", return_type="Dataset"
-)
-ops_info["DataArrayGroupByOpsMixin"] = binops(
-    other_type="T_Xarray", return_type="T_Xarray"
-)
+ops_info = {
+    # TODO add inplace ops for DataTree?
+    "DataTreeOpsMixin": binops(other_type="DtCompatible") + unops(),
+    "DatasetOpsMixin": (
+        binops_overload(other_type="DsCompatible", overload_types=["DataTree"])
+        + inplace(other_type="DsCompatible", type_ignore="misc")
+        + unops()
+    ),
+    "DataArrayOpsMixin": (
+        binops_overload(
+            other_type="DaCompatible", overload_types=["Dataset", "DataTree"]
+        )
+        + inplace(other_type="DaCompatible", type_ignore="misc")
+        + unops()
+    ),
+    "VariableOpsMixin": (
+        binops_overload(
+            other_type="VarCompatible", overload_types=["T_DA", "Dataset", "DataTree"]
+        )
+        + inplace(other_type="VarCompatible", type_ignore="misc")
+        + unops()
+    ),
+    "DatasetGroupByOpsMixin": binops(
+        other_type="Dataset | DataArray", return_type="Dataset"
+    ),
+    "DataArrayGroupByOpsMixin": binops(other_type="T_Xarray", return_type="T_Xarray"),
+}
 
 MODULE_PREAMBLE = '''\
 """Mixin classes with arithmetic operators."""
diff -pruN 2025.03.1-8/xarray/util/print_versions.py 2025.10.1-1/xarray/util/print_versions.py
--- 2025.03.1-8/xarray/util/print_versions.py	2025-04-04 11:41:24.000000000 +0000
+++ 2025.10.1-1/xarray/util/print_versions.py	2025-10-10 10:38:05.000000000 +0000
@@ -1,5 +1,6 @@
 """Utility functions for printing version information."""
 
+import contextlib
 import importlib
 import locale
 import os
@@ -19,7 +20,7 @@ def get_sys_info():
     if os.path.isdir(".git") and os.path.isdir("xarray"):
         try:
             pipe = subprocess.Popen(
-                'git log --format="%H" -n 1'.split(" "),
+                ("git", "log", '--format="%H"', "-n", "1"),
                 stdout=subprocess.PIPE,
                 stderr=subprocess.PIPE,
             )
@@ -29,10 +30,8 @@ def get_sys_info():
         else:
             if pipe.returncode == 0:
                 commit = so
-                try:
+                with contextlib.suppress(ValueError):
                     commit = so.decode("utf-8")
-                except ValueError:
-                    pass
                 commit = commit.strip().strip('"')
 
     blob.append(("commit", commit))
